diff --git a/config/openshift/base/role.yaml b/config/openshift/base/role.yaml
index 9e55b70a68..4f79636159 100644
--- a/config/openshift/base/role.yaml
+++ b/config/openshift/base/role.yaml
@@ -396,3 +396,12 @@ rules:
- delete
- update
- patch
+# to read cluster TLS security profile for centralized TLS configuration
+- apiGroups:
+ - config.openshift.io
+ resources:
+ - apiservers
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/docs/OpenShiftCentralizedTLSManagement.md b/docs/OpenShiftCentralizedTLSManagement.md
new file mode 100644
index 0000000000..fc37193622
--- /dev/null
+++ b/docs/OpenShiftCentralizedTLSManagement.md
@@ -0,0 +1,93 @@
+# Centralized TLS Configuration Support
+
+This change adds support for centralized TLS configuration from OpenShift's APIServer resource, enabling Tekton components to inherit TLS settings (minimum version, cipher suites, curve preferences) from the cluster-wide security policy.
+
+## Key Changes
+
+### 1. New Configuration Flag
+
+- Added `EnableCentralTLSConfig` boolean field to `TektonConfig.Spec.Platforms.OpenShift`
+- When enabled, TLS settings from the cluster's APIServer are automatically injected into supported components
+- Default: `false` (opt-in)
+
+### 2. APIServer Watcher
+
+- Single centralized watcher in TektonConfig controller monitors the APIServer cluster resource
+- Uses a shared informer with 30-minute resync interval
+- When APIServer TLS profile changes, enqueues TektonConfig for reconciliation
+
+### 3. Extension Interface Enhancement
+
+- Added `GetPlatformData() string` method to the Extension interface
+- Enables components to include platform-specific data in installer set hash computation
+- Triggers installer set updates when TLS configuration changes
+
+### 4. TektonResult Integration
+
+- First component to support centralized TLS configuration
+- Injects `TLS_MIN_VERSION`, `TLS_CIPHER_SUITES`, and `TLS_CURVE_PREFERENCES` environment variables into the Results API deployment
+
+## TLS Configuration Flow
+
+```
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ INITIALIZATION │
+├─────────────────────────────────────────────────────────────────────────────┤
+│ │
+│ 1. TektonConfig Controller starts │
+│ └─► setupAPIServerTLSWatch() creates shared informer for APIServer │
+│ └─► Stores lister in occommon.SetSharedAPIServerLister() │
+│ └─► Registers event handler to enqueue TektonConfig on changes │
+│ │
+└─────────────────────────────────────────────────────────────────────────────┘
+
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ RECONCILIATION │
+├─────────────────────────────────────────────────────────────────────────────┤
+│ │
+│ 2. TektonResult reconciliation triggered │
+│ │ │
+│ ▼ │
+│ 3. Extension.PreReconcile(ctx) called │
+│ │ │
+│ ├─► resolveTLSConfig(ctx) │
+│ │ ├─► Check TektonConfig.Spec.Platforms.OpenShift.EnableCentralTLSConfig│
+│ │ │ └─► If false, return nil (no central TLS) │
+│ │ │ │
+│ │ └─► occommon.GetTLSEnvVarsFromAPIServer(ctx) │
+│ │ ├─► Read from shared APIServer lister (no API call) │
+│ │ ├─► Use library-go's ObserveTLSSecurityProfile() │
+│ │ └─► Return TLSEnvVars{MinVersion, CipherSuites, CurvePreferences}│
+│ │ │
+│ └─► Store result in oe.resolvedTLSConfig │
+│ └─► Log: "Injecting central TLS config: MinVersion=..." │
+│ │
+│ 4. Hash computation includes Extension.GetPlatformData() │
+│ └─► Returns fingerprint: "MinVersion:CipherSuites:CurvePreferences" │
+│ └─► Change in TLS config → different hash → installer set update │
+│ │
+│ 5. Extension.Transformers() called │
+│ └─► If resolvedTLSConfig != nil: │
+│ └─► Add injectTLSConfig() transformer │
+│ │
+│ 6. Manifests transformed │
+│ └─► injectTLSConfig() adds env vars to Results API deployment: │
+│ ├─► TLS_MIN_VERSION │
+│ ├─► TLS_CIPHER_SUITES │
+│ └─► TLS_CURVE_PREFERENCES │
+│ │
+└─────────────────────────────────────────────────────────────────────────────┘
+
+┌─────────────────────────────────────────────────────────────────────────────┐
+│ AUTOMATIC UPDATES │
+├─────────────────────────────────────────────────────────────────────────────┤
+│ │
+│ 7. When APIServer TLS profile changes: │
+│ └─► Informer event handler triggers │
+│ └─► Enqueues TektonConfig for reconciliation │
+│ └─► TektonResult reconciled with new TLS config │
+│ └─► New hash computed → InstallerSet updated │
+│ └─► Deployment updated with new env vars │
+│ │
+└─────────────────────────────────────────────────────────────────────────────┘
+```
diff --git a/go.mod b/go.mod
index 3a4c722a9f..71f30176a5 100644
--- a/go.mod
+++ b/go.mod
@@ -16,6 +16,7 @@ require (
github.com/openshift/api v0.0.0-20240521185306-0314f31e7774
github.com/openshift/apiserver-library-go v0.0.0-20230816171015-6bfafa975bfb
github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d
+ github.com/openshift/library-go v0.0.0-20230503173034-95ca3c14e50a
github.com/sigstore/cosign/v2 v2.6.2
github.com/spf13/cobra v1.10.2
github.com/spf13/viper v1.21.0
@@ -141,10 +142,12 @@ require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/emicklei/proto v1.14.2 // indirect
+ github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-chi/chi/v5 v5.2.4 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
@@ -199,6 +202,7 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/henvic/httpretty v0.0.6 // indirect
+ github.com/imdario/mergo v0.3.11 // indirect
github.com/in-toto/attestation v1.1.2 // indirect
github.com/in-toto/in-toto-golang v0.9.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -250,6 +254,7 @@ require (
github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 // indirect
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/robfig/cron v1.2.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/sassoftware/relic v7.2.1+incompatible // indirect
@@ -316,13 +321,17 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
+ k8s.io/apiserver v0.34.1 // indirect
+ k8s.io/component-base v0.34.1 // indirect
k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d // indirect
k8s.io/klog/v2 v2.130.1 // indirect
+ k8s.io/kube-aggregator v0.34.1 // indirect
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect
knative.dev/hack v0.0.0-20250331013814-c577ed9f7775 // indirect
sigs.k8s.io/controller-runtime v0.22.4 // indirect
sigs.k8s.io/gateway-api v1.4.1 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
+ sigs.k8s.io/kube-storage-version-migrator v0.0.4 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/release-utils v0.12.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
diff --git a/go.sum b/go.sum
index 8179b41738..747e28e333 100644
--- a/go.sum
+++ b/go.sum
@@ -1365,6 +1365,7 @@ github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLB
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
@@ -1405,6 +1406,8 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E=
github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE=
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
@@ -1485,6 +1488,7 @@ github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
@@ -1538,6 +1542,7 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
@@ -1622,6 +1627,7 @@ github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido6
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg=
github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
@@ -1630,20 +1636,29 @@ github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod
github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8=
github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -1681,12 +1696,15 @@ github.com/docker/cli v29.2.0+incompatible h1:9oBd9+YM7rxjZLfyMGxjraKBKE4/nVyvVf
github.com/docker/cli v29.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI=
github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
@@ -1716,6 +1734,7 @@ github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87K
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@@ -1750,6 +1769,7 @@ github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-chi/chi/v5 v5.2.4 h1:WtFKPHwlywe8Srng8j2BhOD9312j9cGUxG1SP4V2cR4=
github.com/go-chi/chi/v5 v5.2.4/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0=
@@ -1797,10 +1817,14 @@ github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rB
github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84=
github.com/go-openapi/errors v0.22.6 h1:eDxcf89O8odEnohIXwEjY1IB4ph5vmbUsBMsFNwXWPo=
github.com/go-openapi/errors v0.22.6/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
@@ -1809,10 +1833,13 @@ github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJ
github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY=
github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0=
github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/spec v0.22.3 h1:qRSmj6Smz2rEBxMnLRBMeBWxbbOvuOoElvSvObIgwQc=
github.com/go-openapi/spec v0.22.3/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs=
github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ=
github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
@@ -1872,6 +1899,7 @@ github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlnd
github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
github.com/gobuffalo/envy v1.10.1 h1:ppDLoXv2feQ5nus4IcgtyMdHQkKng2lhJCIm33cblM0=
github.com/gobuffalo/envy v1.10.1/go.mod h1:AWx4++KnNOW3JOeEvhSaq+mvgAvnMYOY1XSIin4Mago=
+github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -1890,6 +1918,7 @@ github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
@@ -1907,6 +1936,7 @@ github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -2031,6 +2061,7 @@ github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0
github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js=
github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -2067,19 +2098,25 @@ github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7
github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y=
github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14=
+github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
@@ -2152,6 +2189,9 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E=
github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM=
github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=
@@ -2186,6 +2226,8 @@ github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -2202,6 +2244,7 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
@@ -2225,6 +2268,7 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
@@ -2258,7 +2302,11 @@ github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuz
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/manifestival/client-go-client v0.6.0 h1:7vV7th1Y48LlJ3UWcR57JYD/h7Oo0Dm0ffmwj8xoEVE=
github.com/manifestival/client-go-client v0.6.0/go.mod h1:2x6VHJ9/2It3TknttgiDgrdhtgwNnCK1JsOh/+3Jld0=
@@ -2275,6 +2323,7 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
@@ -2283,6 +2332,7 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
@@ -2349,7 +2399,10 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo=
github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
@@ -2376,6 +2429,7 @@ github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VF
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
@@ -2415,6 +2469,8 @@ github.com/openshift/apiserver-library-go v0.0.0-20230816171015-6bfafa975bfb h1:
github.com/openshift/apiserver-library-go v0.0.0-20230816171015-6bfafa975bfb/go.mod h1:GLyuhZGQzvcbqJI6OcM6lugGFbveCjQu0H+yHyr9X1w=
github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d h1:Dq21KMlDTVy2fCIyp0gsW+6ir6FwD3RjnCuza2/bIyM=
github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d/go.mod h1:jt2Q+6Iheyh6omSPkRMOC6Doad5My/FfBfJpATPD4g0=
+github.com/openshift/library-go v0.0.0-20230503173034-95ca3c14e50a h1:GWDlGsHQUo2QaXG8r4nCAbAMAYNN85HOMt+vZSLBOdQ=
+github.com/openshift/library-go v0.0.0-20230503173034-95ca3c14e50a/go.mod h1:PJVatR/oS/EaFciwylyAr9hORSqQHrC+5bXf4L0wsBY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
@@ -2444,6 +2500,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
@@ -2514,6 +2571,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qq
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
+github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -2524,6 +2583,7 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
@@ -2587,6 +2647,7 @@ github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrel
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
@@ -2596,17 +2657,21 @@ github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
@@ -2682,6 +2747,7 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHT
github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w=
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
@@ -2689,6 +2755,8 @@ github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ
github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI=
github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4=
github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4=
@@ -2707,6 +2775,7 @@ github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg=
github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
@@ -2735,7 +2804,9 @@ gitlab.com/gitlab-org/api/client-go v1.14.0 h1:0TAU8zwN4p6ZMUnXLUEkSRmUr+mN4B3JQ
gitlab.com/gitlab-org/api/client-go v1.14.0/go.mod h1:adtVJ4zSTEJ2fP5Pb1zF4Ox1OKFg0MH43yxpb0T0248=
go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
@@ -2843,6 +2914,7 @@ go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8
go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0=
go.step.sm/crypto v0.75.0 h1:UAHYD6q6ggYyzLlIKHv1MCUVjZIesXRZpGTlRC/HSHw=
go.step.sm/crypto v0.75.0/go.mod h1:wwQ57+ajmDype9mrI/2hRyrvJd7yja5xVgWYqpUN3PE=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
@@ -2875,12 +2947,15 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -3008,7 +3083,10 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -3160,6 +3238,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -3168,8 +3247,10 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -3337,6 +3418,7 @@ golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -3352,6 +3434,7 @@ golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -3363,10 +3446,12 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -3809,6 +3894,7 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -3899,6 +3985,7 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
@@ -3915,6 +4002,7 @@ gopkg.in/ini.v1 v1.67.1/go.mod h1:x/cyOwCgZqOkJoDIJ3c1KNHMo10+nLGAhh+kn3Zizss=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@@ -3928,11 +4016,13 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
@@ -3950,17 +4040,25 @@ k8s.io/apiextensions-apiserver v0.32.9 h1:tpT1dUgWqEsTyrdoGckyw8OBASW1JfU08tHGaY
k8s.io/apiextensions-apiserver v0.32.9/go.mod h1:FoCi4zCLK67LNCCssFa2Wr9q4Xbvjx7MW4tdze5tpoA=
k8s.io/apimachinery v0.32.4 h1:8EEksaxA7nd7xWJkkwLDN4SvWS5ot9g6Z/VZb3ju25I=
k8s.io/apimachinery v0.32.4/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
+k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A=
k8s.io/apiserver v0.32.9/go.mod h1:MuuqNdvkneD4kcQc5mUZQCOQYzfKMba6P36bVW+wZtI=
+k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA=
+k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0=
k8s.io/client-go v0.32.4 h1:zaGJS7xoYOYumoWIFXlcVrsiYioRPrXGO7dBfVC5R6M=
k8s.io/client-go v0.32.4/go.mod h1:k0jftcyYnEtwlFW92xC7MTtFv5BNcZBr+zn9jPlT9Ic=
k8s.io/code-generator v0.32.4 h1:d4dm/43RD6xhPBX22JgJw9JUpwTKzVR6tAxJD7pz83o=
k8s.io/code-generator v0.32.4/go.mod h1:R0bKdIg1smtvsKvj9q7SxTeKq5X9ko6PuICCGt4yqxg=
+k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks=
k8s.io/component-base v0.27.7/go.mod h1:YGjlCVL1oeKvG3HSciyPHFh+LCjIEqsxz4BDR3cfHRs=
k8s.io/component-base v0.32.9/go.mod h1:AfJMbzLk8iyOyDPkv/HpAjYmdNGookl9O0Kva5Wu83U=
+k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A=
+k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0=
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d h1:qUrYOinhdAUL0xxhA4gPqogPBaS9nIq2l2kTb6pmeB0=
k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
@@ -3969,8 +4067,12 @@ k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.32.9/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
+k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk=
+k8s.io/kube-aggregator v0.34.1 h1:WNLV0dVNoFKmuyvdWLd92iDSyD/TSTjqwaPj0U9XAEU=
+k8s.io/kube-aggregator v0.34.1/go.mod h1:RU8j+5ERfp0h+gIvWtxRPfsa5nK7rboDm8RST8BJfYQ=
k8s.io/kube-openapi v0.0.0-20250627150254-e9823e99808e h1:UGI9rv1A2cV87NhXr4s+AUBxIuoo/SME/IyJ3b6KztE=
k8s.io/kube-openapi v0.0.0-20250627150254-e9823e99808e/go.mod h1:GLOk5B+hDbRROvt0X2+hqX64v/zO3vXN7J78OUmBSKw=
+k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
@@ -4050,26 +4152,32 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds=
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
+sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE=
sigs.k8s.io/gateway-api v1.4.1 h1:NPxFutNkKNa8UfLd2CMlEuhIPMQgDQ6DXNKG9sHbJU8=
sigs.k8s.io/gateway-api v1.4.1/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc=
+sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/release-utils v0.12.3 h1:iNVJY81QfmMCmXxMg8IvvkkeQNk6ZWlLj+iPKSlKyVQ=
sigs.k8s.io/release-utils v0.12.3/go.mod h1:BvbNmm1BmM3cnEpBmNHWL3wOSziOdGlsYR8vCFq/Q0o=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/pkg/apis/operator/v1alpha1/openshift_platform.go b/pkg/apis/operator/v1alpha1/openshift_platform.go
index 52c770dc81..620efdf754 100644
--- a/pkg/apis/operator/v1alpha1/openshift_platform.go
+++ b/pkg/apis/operator/v1alpha1/openshift_platform.go
@@ -23,6 +23,16 @@ type OpenShift struct {
// SCC allows configuring security context constraints used by workloads
// +optional
SCC *SCC `json:"scc,omitempty"`
+ // EnableCentralTLSConfig enables TLS configuration inheritance from
+ // the cluster's APIServer TLS security profile. When enabled, TLS settings
+ // (minimum version, cipher suites, curve preferences) are automatically
+ // derived from the cluster-wide security policy and injected into Tekton
+ // component containers that support TLS configuration.
+ // If the APIServer does not have a TLS profile configured, user-specified
+ // TLS settings in component configurations will be used as fallback.
+ // Default: false (opt-in)
+ // +optional
+ EnableCentralTLSConfig bool `json:"enableCentralTLSConfig,omitempty"`
}
type PipelinesAsCode struct {
diff --git a/pkg/reconciler/common/extensions.go b/pkg/reconciler/common/extensions.go
index d28871db4b..827cfd8689 100644
--- a/pkg/reconciler/common/extensions.go
+++ b/pkg/reconciler/common/extensions.go
@@ -29,6 +29,10 @@ type Extension interface {
PreReconcile(context.Context, v1alpha1.TektonComponent) error
PostReconcile(context.Context, v1alpha1.TektonComponent) error
Finalize(context.Context, v1alpha1.TektonComponent) error
+ // GetPlatformData returns platform-specific data to include in installer set hash.
+ // This enables triggering installer set updates when platform-specific config changes
+ // (e.g., TLS configuration from APIServer on OpenShift).
+ GetPlatformData() string
}
// ExtensionGenerator creates an Extension from a Context
@@ -53,3 +57,6 @@ func (nilExtension) PostReconcile(context.Context, v1alpha1.TektonComponent) err
func (nilExtension) Finalize(context.Context, v1alpha1.TektonComponent) error {
return nil
}
+func (nilExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/common/extensions_test.go b/pkg/reconciler/common/extensions_test.go
index 5a86b1f66c..332f7f949a 100644
--- a/pkg/reconciler/common/extensions_test.go
+++ b/pkg/reconciler/common/extensions_test.go
@@ -43,6 +43,10 @@ func (t TestExtension) Finalize(context.Context, v1alpha1.TektonComponent) error
return nil
}
+func (t TestExtension) GetPlatformData() string {
+ return ""
+}
+
func TestExtensions(t *testing.T) {
tests := []struct {
name string
diff --git a/pkg/reconciler/kubernetes/tektonconfig/extension.go b/pkg/reconciler/kubernetes/tektonconfig/extension.go
index 0a7361fdb4..a677c43933 100644
--- a/pkg/reconciler/kubernetes/tektonconfig/extension.go
+++ b/pkg/reconciler/kubernetes/tektonconfig/extension.go
@@ -67,3 +67,7 @@ func (oe kubernetesExtension) Finalize(ctx context.Context, comp v1alpha1.Tekton
}
return nil
}
+
+func (oe kubernetesExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/kubernetes/tektonresult/installerset.go b/pkg/reconciler/kubernetes/tektonresult/installerset.go
index 893eea446b..46aab19bb5 100644
--- a/pkg/reconciler/kubernetes/tektonresult/installerset.go
+++ b/pkg/reconciler/kubernetes/tektonresult/installerset.go
@@ -33,11 +33,18 @@ func (r *Reconciler) createInstallerSet(ctx context.Context, tr *v1alpha1.Tekton
return nil, err
}
- // compute the hash of tektonresult spec and store as an annotation
- // in further reconciliation we compute hash of td spec and check with
- // annotation, if they are same then we skip updating the object
+ // compute the hash of tektonresult spec (including platform-specific data)
+ // and store as an annotation. In further reconciliation we compute hash
+ // and check with annotation, if they are same then we skip updating the object
// otherwise we update the manifest
- specHash, err := hash.Compute(tr.Spec)
+ hashInput := struct {
+ Spec v1alpha1.TektonResultSpec
+ ExtraData string
+ }{
+ Spec: tr.Spec,
+ ExtraData: r.extension.GetPlatformData(),
+ }
+ specHash, err := hash.Compute(hashInput)
if err != nil {
return nil, err
}
diff --git a/pkg/reconciler/kubernetes/tektonresult/tektonresult.go b/pkg/reconciler/kubernetes/tektonresult/tektonresult.go
index aa82671e6c..104f391cf6 100644
--- a/pkg/reconciler/kubernetes/tektonresult/tektonresult.go
+++ b/pkg/reconciler/kubernetes/tektonresult/tektonresult.go
@@ -322,8 +322,15 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, tr *v1alpha1.TektonResul
// of TektonResult is changed by checking hash stored as annotation on
// TektonInstallerSet with computing new hash of TektonResult Spec
logger.Debug("Checking for spec changes in TektonResult")
- // Hash of TektonResult Spec
- expectedSpecHash, err := hash.Compute(tr.Spec)
+ // Hash of TektonResult Spec including platform-specific data (e.g., TLS config)
+ hashInput := struct {
+ Spec v1alpha1.TektonResultSpec
+ ExtraData string
+ }{
+ Spec: tr.Spec,
+ ExtraData: r.extension.GetPlatformData(),
+ }
+ expectedSpecHash, err := hash.Compute(hashInput)
if err != nil {
logger.Errorw("Failed to compute spec hash", "error", err)
return err
diff --git a/pkg/reconciler/openshift/common/tlsprofile.go b/pkg/reconciler/openshift/common/tlsprofile.go
new file mode 100644
index 0000000000..20b558076b
--- /dev/null
+++ b/pkg/reconciler/openshift/common/tlsprofile.go
@@ -0,0 +1,375 @@
+/*
+Copyright 2026 The Tekton Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ mf "github.com/manifestival/manifestival"
+ configv1 "github.com/openshift/api/config/v1"
+ openshiftconfigclient "github.com/openshift/client-go/config/clientset/versioned"
+ configv1listers "github.com/openshift/client-go/config/listers/config/v1"
+ "github.com/openshift/library-go/pkg/operator/configobserver/apiserver"
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resourcesynccontroller"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/cache"
+ "knative.dev/pkg/logging"
+
+ "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1"
+)
+
+const (
+ // TLS environment variable names used by Tekton components
+ TLSMinVersionEnvVar = "TLS_MIN_VERSION"
+ TLSCipherSuitesEnvVar = "TLS_CIPHER_SUITES"
+ TLSCurvePreferencesEnvVar = "TLS_CURVE_PREFERENCES"
+)
+
+// TLSEnvVars holds TLS configuration as environment variable values
+type TLSEnvVars struct {
+ MinVersion string
+ CipherSuites string
+ CurvePreferences string
+}
+
+// APIServerListers implements the configobserver.Listers interface for accessing APIServer resources.
+// This adapter enables using library-go's ObserveTLSSecurityProfile function with our informer setup.
+type APIServerListers struct {
+ lister configv1listers.APIServerLister
+}
+
+// APIServerLister returns the APIServer lister
+func (a *APIServerListers) APIServerLister() configv1listers.APIServerLister {
+ return a.lister
+}
+
+// ResourceSyncer is not used but required by the Listers interface
+func (a *APIServerListers) ResourceSyncer() resourcesynccontroller.ResourceSyncer {
+ return nil
+}
+
+// PreRunHasSynced returns nil (no pre-run sync needed)
+func (a *APIServerListers) PreRunHasSynced() []cache.InformerSynced {
+ return nil
+}
+
+// sharedAPIServerLister holds the singleton lister and client for APIServer resources.
+// This is initialized once by the TektonConfig controller and shared across all components.
+var (
+ sharedAPIServerLister configv1listers.APIServerLister
+ sharedConfigClient openshiftconfigclient.Interface
+ sharedListerMu sync.RWMutex
+)
+
+// SetSharedAPIServerLister sets the shared APIServer lister and client.
+// This should be called once during TektonConfig controller initialization.
+func SetSharedAPIServerLister(lister configv1listers.APIServerLister, client openshiftconfigclient.Interface) {
+ sharedListerMu.Lock()
+ defer sharedListerMu.Unlock()
+ sharedAPIServerLister = lister
+ sharedConfigClient = client
+}
+
+// TLSProfileConfig holds the raw TLS profile data as extracted from the APIServer resource.
+// Values are in library-go / OpenShift API format (e.g. "VersionTLS12", IANA cipher names).
+type TLSProfileConfig struct {
+ MinTLSVersion string
+ CipherSuites []string
+ CurvePreferences []string // Not yet populated; will be set once openshift/api#2583 is merged
+}
+
+// GetTLSProfileFromAPIServer fetches the raw TLS security profile from the OpenShift APIServer
+// resource. Returns (nil, nil) if no TLS profile is configured or the shared lister is not initialized.
+func GetTLSProfileFromAPIServer(ctx context.Context) (*TLSProfileConfig, error) {
+ logger := logging.FromContext(ctx)
+
+ sharedListerMu.RLock()
+ lister := sharedAPIServerLister
+ client := sharedConfigClient
+ sharedListerMu.RUnlock()
+
+ if lister == nil {
+ logger.Debug("Shared APIServer lister not initialized, TLS config not available")
+ return nil, nil
+ }
+
+ listers := &APIServerListers{
+ lister: lister,
+ }
+
+ // Use library-go's ObserveTLSSecurityProfile to extract TLS config.
+ // Note: ObserveTLSSecurityProfile requires:
+ // - non-nil recorder: it calls recorder.Eventf() to log changes
+ // - non-nil existingConfig: it reads from it via unstructured.NestedString()
+ // TODO: Once library-go is updated to a newer version (with TLS 1.3 cipher support),
+ // the supplementTLS13Ciphers workaround below can be removed.
+ existingConfig := map[string]interface{}{}
+ recorder := events.NewLoggingEventRecorder("tekton-operator")
+ observedConfig, errs := apiserver.ObserveTLSSecurityProfile(listers, recorder, existingConfig)
+ if len(errs) > 0 {
+ return nil, errors.Join(errs...)
+ }
+
+ servingInfo, ok := observedConfig["servingInfo"].(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+
+ minVersion, _ := servingInfo["minTLSVersion"].(string)
+
+ var cipherSuites []string
+ if ciphers, ok := servingInfo["cipherSuites"].([]interface{}); ok {
+ for _, c := range ciphers {
+ if cs, ok := c.(string); ok {
+ cipherSuites = append(cipherSuites, cs)
+ }
+ }
+ }
+
+ if minVersion == "" && len(cipherSuites) == 0 {
+ return nil, nil
+ }
+
+ // Supplement TLS 1.3 ciphers if needed
+ // TODO: Remove this once library-go is updated with proper TLS 1.3 cipher mapping
+ if client != nil {
+ apiServer, err := lister.Get("cluster")
+ if err == nil && apiServer.Spec.TLSSecurityProfile != nil {
+ cipherSuites = supplementTLS13Ciphers(apiServer.Spec.TLSSecurityProfile, cipherSuites)
+ }
+ }
+
+ return &TLSProfileConfig{
+ MinTLSVersion: minVersion,
+ CipherSuites: cipherSuites,
+ CurvePreferences: nil,
+ }, nil
+}
+
+// TLSEnvVarsFromProfile validates and converts a raw TLSProfileConfig to TLSEnvVars
+// suitable for injection into component deployments.
+func TLSEnvVarsFromProfile(cfg *TLSProfileConfig) (*TLSEnvVars, error) {
+ if cfg == nil {
+ return nil, nil
+ }
+
+ envMinVersion, err := convertTLSVersionToEnvFormat(cfg.MinTLSVersion)
+ if err != nil {
+ return nil, fmt.Errorf("invalid TLS configuration: %w", err)
+ }
+
+ return &TLSEnvVars{
+ MinVersion: envMinVersion,
+ CipherSuites: strings.Join(cfg.CipherSuites, ","),
+ CurvePreferences: strings.Join(cfg.CurvePreferences, ","),
+ }, nil
+}
+
+// TektonConfigLister abstracts access to TektonConfig resources.
+type TektonConfigLister interface {
+ Get(name string) (*v1alpha1.TektonConfig, error)
+}
+
+// ResolveCentralTLSToEnvVars checks whether central TLS config is enabled in TektonConfig,
+// fetches the raw profile from the shared APIServer lister, and converts it to env vars.
+// Returns (nil, nil) if central TLS is disabled or no TLS config is available.
+func ResolveCentralTLSToEnvVars(ctx context.Context, lister TektonConfigLister) (*TLSEnvVars, error) {
+ tc, err := lister.Get(v1alpha1.ConfigResourceName)
+ if err != nil {
+ return nil, err
+ }
+
+ if !tc.Spec.Platforms.OpenShift.EnableCentralTLSConfig {
+ return nil, nil
+ }
+
+ profile, err := GetTLSProfileFromAPIServer(ctx)
+ if err != nil || profile == nil {
+ return nil, err
+ }
+ return TLSEnvVarsFromProfile(profile)
+}
+
+// convertTLSVersionToEnvFormat converts library-go TLS version format (VersionTLSxx) to
+// the format expected by Go's crypto/tls (1.x)
+func convertTLSVersionToEnvFormat(version string) (string, error) {
+ switch version {
+ case "VersionTLS10":
+ return "1.0", nil
+ case "VersionTLS11":
+ return "1.1", nil
+ case "VersionTLS12":
+ return "1.2", nil
+ case "VersionTLS13":
+ return "1.3", nil
+ default:
+ return "", fmt.Errorf("unknown TLS version: %s", version)
+ }
+}
+
+// InjectTLSEnvVars returns a transformer that injects TLS environment variables into
+// the specified containers of a Deployment or StatefulSet matched by name.
+func InjectTLSEnvVars(tlsEnvVars *TLSEnvVars, kind string, resourceName string, containerNames []string) mf.Transformer {
+ return func(u *unstructured.Unstructured) error {
+ if u.GetKind() != kind || u.GetName() != resourceName {
+ return nil
+ }
+
+ envVars := buildTLSEnvVarList(tlsEnvVars)
+ if len(envVars) == 0 {
+ return nil
+ }
+
+ switch kind {
+ case "Deployment":
+ return injectEnvVarsIntoDeployment(u, containerNames, envVars)
+ case "StatefulSet":
+ return injectEnvVarsIntoStatefulSet(u, containerNames, envVars)
+ }
+ return nil
+ }
+}
+
+func buildTLSEnvVarList(tlsEnvVars *TLSEnvVars) []corev1.EnvVar {
+ var envVars []corev1.EnvVar
+ if tlsEnvVars.MinVersion != "" {
+ envVars = append(envVars, corev1.EnvVar{Name: TLSMinVersionEnvVar, Value: tlsEnvVars.MinVersion})
+ }
+ if tlsEnvVars.CipherSuites != "" {
+ envVars = append(envVars, corev1.EnvVar{Name: TLSCipherSuitesEnvVar, Value: tlsEnvVars.CipherSuites})
+ }
+ if tlsEnvVars.CurvePreferences != "" {
+ envVars = append(envVars, corev1.EnvVar{Name: TLSCurvePreferencesEnvVar, Value: tlsEnvVars.CurvePreferences})
+ }
+ return envVars
+}
+
+func injectEnvVarsIntoDeployment(u *unstructured.Unstructured, containerNames []string, envVars []corev1.EnvVar) error {
+ d := &appsv1.Deployment{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, d); err != nil {
+ return err
+ }
+ mergeEnvVarsIntoContainers(d.Spec.Template.Spec.Containers, containerNames, envVars)
+ uObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(d)
+ if err != nil {
+ return err
+ }
+ u.SetUnstructuredContent(uObj)
+ return nil
+}
+
+func injectEnvVarsIntoStatefulSet(u *unstructured.Unstructured, containerNames []string, envVars []corev1.EnvVar) error {
+ sts := &appsv1.StatefulSet{}
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, sts); err != nil {
+ return err
+ }
+ mergeEnvVarsIntoContainers(sts.Spec.Template.Spec.Containers, containerNames, envVars)
+ uObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(sts)
+ if err != nil {
+ return err
+ }
+ u.SetUnstructuredContent(uObj)
+ return nil
+}
+
+func mergeEnvVarsIntoContainers(containers []corev1.Container, names []string, envVars []corev1.EnvVar) {
+ nameSet := make(map[string]bool, len(names))
+ for _, n := range names {
+ nameSet[n] = true
+ }
+ for i, container := range containers {
+ if !nameSet[container.Name] {
+ continue
+ }
+ existing := container.Env
+ for _, newEnv := range envVars {
+ found := false
+ for j, e := range existing {
+ if e.Name == newEnv.Name {
+ existing[j] = newEnv
+ found = true
+ break
+ }
+ }
+ if !found {
+ existing = append(existing, newEnv)
+ }
+ }
+ containers[i].Env = existing
+ }
+}
+
+// supplementTLS13Ciphers adds TLS 1.3 ciphers that the older library-go version doesn't map.
+// TLS 1.3 ciphers are mandatory per RFC 8446 and are always enabled when TLS 1.3 is used,
+// but we include them explicitly for completeness.
+// TODO: Remove this function once library-go is updated to a version that properly maps TLS 1.3 ciphers.
+func supplementTLS13Ciphers(profile *configv1.TLSSecurityProfile, observedCiphers []string) []string {
+ if profile == nil {
+ return observedCiphers
+ }
+
+ // Get the profile spec that defines the configured ciphers
+ var profileSpec *configv1.TLSProfileSpec
+ switch profile.Type {
+ case configv1.TLSProfileCustomType:
+ if profile.Custom != nil {
+ profileSpec = &profile.Custom.TLSProfileSpec
+ }
+ case configv1.TLSProfileModernType:
+ profileSpec = configv1.TLSProfiles[configv1.TLSProfileModernType]
+ case configv1.TLSProfileIntermediateType:
+ profileSpec = configv1.TLSProfiles[configv1.TLSProfileIntermediateType]
+ case configv1.TLSProfileOldType:
+ profileSpec = configv1.TLSProfiles[configv1.TLSProfileOldType]
+ }
+
+ if profileSpec == nil {
+ return observedCiphers
+ }
+
+ // Build a set of already observed ciphers for quick lookup
+ observedSet := make(map[string]bool)
+ for _, c := range observedCiphers {
+ observedSet[c] = true
+ }
+
+ // TLS 1.3 cipher suite names (IANA names)
+ tls13Ciphers := map[string]bool{
+ "TLS_AES_128_GCM_SHA256": true,
+ "TLS_AES_256_GCM_SHA384": true,
+ "TLS_CHACHA20_POLY1305_SHA256": true,
+ }
+
+ // Check configured ciphers for TLS 1.3 ciphers that library-go might have missed
+ result := observedCiphers
+ for _, cipher := range profileSpec.Ciphers {
+ // If it's a TLS 1.3 cipher and not already in observed list, add it
+ if tls13Ciphers[cipher] && !observedSet[cipher] {
+ result = append(result, cipher)
+ }
+ }
+
+ return result
+}
diff --git a/pkg/reconciler/openshift/common/tlsprofile_test.go b/pkg/reconciler/openshift/common/tlsprofile_test.go
new file mode 100644
index 0000000000..24bd5ef012
--- /dev/null
+++ b/pkg/reconciler/openshift/common/tlsprofile_test.go
@@ -0,0 +1,226 @@
+/*
+Copyright 2026 The Tekton Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "testing"
+
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+func TestConvertTLSVersionToEnvFormat(t *testing.T) {
+ tests := []struct {
+ name string
+ version string
+ expected string
+ expectErr bool
+ }{
+ {"TLS 1.0", "VersionTLS10", "1.0", false},
+ {"TLS 1.1", "VersionTLS11", "1.1", false},
+ {"TLS 1.2", "VersionTLS12", "1.2", false},
+ {"TLS 1.3", "VersionTLS13", "1.3", false},
+ {"Unknown version", "UnknownVersion", "", true},
+ {"Empty string", "", "", true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := convertTLSVersionToEnvFormat(tt.version)
+ if tt.expectErr && err == nil {
+ t.Errorf("convertTLSVersionToEnvFormat(%s) expected error, got nil", tt.version)
+ }
+ if !tt.expectErr && err != nil {
+ t.Errorf("convertTLSVersionToEnvFormat(%s) unexpected error: %v", tt.version, err)
+ }
+ if result != tt.expected {
+ t.Errorf("convertTLSVersionToEnvFormat(%s) = %s, want %s", tt.version, result, tt.expected)
+ }
+ })
+ }
+}
+
+func TestSupplementTLS13Ciphers(t *testing.T) {
+ tests := []struct {
+ name string
+ profile *configv1.TLSSecurityProfile
+ observedCiphers []string
+ expectContains []string
+ }{
+ {
+ name: "Nil profile returns observed ciphers unchanged",
+ profile: nil,
+ observedCiphers: []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"},
+ expectContains: []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"},
+ },
+ {
+ name: "Custom profile with TLS 1.3 ciphers supplements missing ones",
+ profile: &configv1.TLSSecurityProfile{
+ Type: configv1.TLSProfileCustomType,
+ Custom: &configv1.CustomTLSProfile{
+ TLSProfileSpec: configv1.TLSProfileSpec{
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ },
+ },
+ },
+ },
+ observedCiphers: []string{},
+ expectContains: []string{"TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384"},
+ },
+ {
+ name: "Mixed ciphers - TLS 1.3 supplemented, TLS 1.2 kept",
+ profile: &configv1.TLSSecurityProfile{
+ Type: configv1.TLSProfileCustomType,
+ Custom: &configv1.CustomTLSProfile{
+ TLSProfileSpec: configv1.TLSProfileSpec{
+ Ciphers: []string{
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ "TLS_AES_128_GCM_SHA256",
+ },
+ },
+ },
+ },
+ observedCiphers: []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"},
+ expectContains: []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_AES_128_GCM_SHA256"},
+ },
+ {
+ name: "Already present TLS 1.3 ciphers not duplicated",
+ profile: &configv1.TLSSecurityProfile{
+ Type: configv1.TLSProfileCustomType,
+ Custom: &configv1.CustomTLSProfile{
+ TLSProfileSpec: configv1.TLSProfileSpec{
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ },
+ },
+ },
+ },
+ observedCiphers: []string{"TLS_AES_128_GCM_SHA256"},
+ expectContains: []string{"TLS_AES_128_GCM_SHA256"},
+ },
+ {
+ name: "Modern profile type uses predefined profile spec",
+ profile: &configv1.TLSSecurityProfile{
+ Type: configv1.TLSProfileModernType,
+ },
+ observedCiphers: []string{},
+ // Modern profile includes TLS 1.3 ciphers in predefined spec
+ expectContains: []string{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := supplementTLS13Ciphers(tt.profile, tt.observedCiphers)
+
+ for _, expected := range tt.expectContains {
+ found := false
+ for _, cipher := range result {
+ if cipher == expected {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Expected cipher %s not found in result %v", expected, result)
+ }
+ }
+ })
+ }
+}
+
+func TestTLSEnvVarsFromProfile(t *testing.T) {
+ t.Run("nil config returns nil", func(t *testing.T) {
+ result, err := TLSEnvVarsFromProfile(nil)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if result != nil {
+ t.Errorf("Expected nil, got %v", result)
+ }
+ })
+
+ t.Run("valid TLS 1.2 profile", func(t *testing.T) {
+ cfg := &TLSProfileConfig{
+ MinTLSVersion: "VersionTLS12",
+ CipherSuites: []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_AES_128_GCM_SHA256"},
+ }
+ result, err := TLSEnvVarsFromProfile(cfg)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if result.MinVersion != "1.2" {
+ t.Errorf("MinVersion = %s, want 1.2", result.MinVersion)
+ }
+ if result.CipherSuites != "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_AES_128_GCM_SHA256" {
+ t.Errorf("CipherSuites = %s, want TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_AES_128_GCM_SHA256", result.CipherSuites)
+ }
+ })
+
+ t.Run("valid TLS 1.3 profile", func(t *testing.T) {
+ cfg := &TLSProfileConfig{
+ MinTLSVersion: "VersionTLS13",
+ CipherSuites: []string{"TLS_AES_128_GCM_SHA256"},
+ }
+ result, err := TLSEnvVarsFromProfile(cfg)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if result.MinVersion != "1.3" {
+ t.Errorf("MinVersion = %s, want 1.3", result.MinVersion)
+ }
+ })
+
+ t.Run("invalid TLS version rejects entire config", func(t *testing.T) {
+ cfg := &TLSProfileConfig{
+ MinTLSVersion: "InvalidVersion",
+ CipherSuites: []string{"TLS_AES_128_GCM_SHA256"},
+ }
+ result, err := TLSEnvVarsFromProfile(cfg)
+ if err == nil {
+ t.Error("Expected error for invalid TLS version, got nil")
+ }
+ if result != nil {
+ t.Errorf("Expected nil result on error, got %v", result)
+ }
+ })
+
+ t.Run("empty cipher suites", func(t *testing.T) {
+ cfg := &TLSProfileConfig{
+ MinTLSVersion: "VersionTLS12",
+ CipherSuites: nil,
+ }
+ result, err := TLSEnvVarsFromProfile(cfg)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if result.CipherSuites != "" {
+ t.Errorf("CipherSuites = %s, want empty", result.CipherSuites)
+ }
+ })
+}
+
+func TestAPIServerListersInterface(t *testing.T) {
+ // Verify that APIServerListers implements the interface methods correctly
+ listers := &APIServerListers{}
+
+ // These should not panic
+ _ = listers.ResourceSyncer()
+ _ = listers.PreRunHasSynced()
+ _ = listers.APIServerLister()
+}
diff --git a/pkg/reconciler/openshift/manualapprovalgate/extension.go b/pkg/reconciler/openshift/manualapprovalgate/extension.go
index 5342bc0693..e76a60d3a4 100644
--- a/pkg/reconciler/openshift/manualapprovalgate/extension.go
+++ b/pkg/reconciler/openshift/manualapprovalgate/extension.go
@@ -52,3 +52,7 @@ func (oe openshiftExtension) PostReconcile(context.Context, v1alpha1.TektonCompo
func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent) error {
return nil
}
+
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/extension.go b/pkg/reconciler/openshift/openshiftpipelinesascode/extension.go
index 1640b5409a..975e83c66e 100644
--- a/pkg/reconciler/openshift/openshiftpipelinesascode/extension.go
+++ b/pkg/reconciler/openshift/openshiftpipelinesascode/extension.go
@@ -111,6 +111,10 @@ func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent)
return nil
}
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
+
func extFilterAndTransform() client.FilterAndTransform {
return func(ctx context.Context, manifest *mf.Manifest, comp v1alpha1.TektonComponent) (*mf.Manifest, error) {
prTemplates, err := manifest.Transform(mf.InjectNamespace(openshiftNS))
diff --git a/pkg/reconciler/openshift/syncerservice/extension.go b/pkg/reconciler/openshift/syncerservice/extension.go
index a8e3830c39..b5ec0c4eb7 100644
--- a/pkg/reconciler/openshift/syncerservice/extension.go
+++ b/pkg/reconciler/openshift/syncerservice/extension.go
@@ -50,3 +50,7 @@ func (oe openshiftExtension) PostReconcile(ctx context.Context, tc v1alpha1.Tekt
func (oe openshiftExtension) Finalize(ctx context.Context, tc v1alpha1.TektonComponent) error {
return nil
}
+
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/openshift/tektonchain/extension.go b/pkg/reconciler/openshift/tektonchain/extension.go
index 48efbd762e..11e8bd15fb 100644
--- a/pkg/reconciler/openshift/tektonchain/extension.go
+++ b/pkg/reconciler/openshift/tektonchain/extension.go
@@ -61,3 +61,7 @@ func (oe openshiftExtension) PostReconcile(context.Context, v1alpha1.TektonCompo
func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent) error {
return nil
}
+
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/openshift/tektonconfig/controller.go b/pkg/reconciler/openshift/tektonconfig/controller.go
index efb5d1e56b..d44a6998d6 100644
--- a/pkg/reconciler/openshift/tektonconfig/controller.go
+++ b/pkg/reconciler/openshift/tektonconfig/controller.go
@@ -18,14 +18,23 @@ package tektonconfig
import (
"context"
+ "os"
+ "time"
+ configv1 "github.com/openshift/api/config/v1"
+ openshiftconfigclient "github.com/openshift/client-go/config/clientset/versioned"
+ configinformers "github.com/openshift/client-go/config/informers/externalversions"
"github.com/tektoncd/operator/pkg/apis/operator/v1alpha1"
openshiftpipelinesascodeinformer "github.com/tektoncd/operator/pkg/client/injection/informers/operator/v1alpha1/openshiftpipelinesascode"
tektonAddoninformer "github.com/tektoncd/operator/pkg/client/injection/informers/operator/v1alpha1/tektonaddon"
+ occommon "github.com/tektoncd/operator/pkg/reconciler/openshift/common"
"github.com/tektoncd/operator/pkg/reconciler/shared/tektonconfig"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
+ "knative.dev/pkg/injection"
"knative.dev/pkg/logging"
)
@@ -46,5 +55,138 @@ func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl
}); err != nil {
logger.Panicf("Couldn't register OpenShiftPipelinesAsCode informer event handler: %w", err)
}
+
+ // Setup APIServer TLS profile watcher
+ // When the cluster's TLS security profile changes, enqueue TektonConfig for reconciliation
+ const skipAPIServerWatch = "SKIP_APISERVER_TLS_WATCH"
+ if err := setupAPIServerTLSWatch(ctx, ctrl); err != nil {
+ // On OpenShift clusters the APIServer resource should always exist.
+ // This env var is an escape hatch for edge cases and must be explicitly enabled.
+ if os.Getenv(skipAPIServerWatch) == "true" {
+ logger.Warnf("APIServer TLS profile watch not enabled: %v", err)
+ } else {
+ logger.Panicf("Couldn't setup APIServer TLS profile watch: %v", err)
+ }
+ }
+
return ctrl
}
+
+// setupAPIServerTLSWatch sets up a watch on the OpenShift APIServer resource
+// to monitor TLS security profile changes. When changes are detected, it enqueues
+// TektonConfig for reconciliation so TLS config can be propagated to components.
+func setupAPIServerTLSWatch(ctx context.Context, impl *controller.Impl) error {
+ logger := logging.FromContext(ctx)
+ restConfig := injection.GetConfig(ctx)
+
+ // Create OpenShift config client
+ configClient, err := openshiftconfigclient.NewForConfig(restConfig)
+ if err != nil {
+ return err
+ }
+
+ // Check if we can access the APIServer resource
+ _, err = configClient.ConfigV1().APIServers().Get(ctx, "cluster", metav1.GetOptions{})
+ if err != nil {
+ return err
+ }
+
+ // Create a shared informer factory for OpenShift config resources.
+ // 30 minute resync is sufficient since the APIServer resource rarely changes
+ // and the watch mechanism handles real-time updates.
+ configInformerFactory := configinformers.NewSharedInformerFactory(configClient, 30*time.Minute)
+
+ // Get the APIServer informer
+ apiServerInformer := configInformerFactory.Config().V1().APIServers()
+
+ // Add event handler to watch for APIServer changes
+ if _, err := apiServerInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ oldAPIServer, ok := oldObj.(*configv1.APIServer)
+ if !ok {
+ return
+ }
+ newAPIServer, ok := newObj.(*configv1.APIServer)
+ if !ok {
+ return
+ }
+
+ // Check if TLS security profile actually changed
+ if !tlsProfileChanged(oldAPIServer, newAPIServer) {
+ return
+ }
+
+ logger.Info("APIServer TLS security profile changed, triggering TektonConfig reconciliation")
+ impl.EnqueueKey(types.NamespacedName{Name: v1alpha1.ConfigResourceName})
+ },
+ }); err != nil {
+ return err
+ }
+
+ // Start the informer factory
+ configInformerFactory.Start(ctx.Done())
+
+ // Wait for caches to sync
+ if !cache.WaitForCacheSync(ctx.Done(), apiServerInformer.Informer().HasSynced) {
+ logger.Warn("Failed to sync APIServer informer cache")
+ }
+
+ // Share the lister with other components so they don't need to create their own informers
+ occommon.SetSharedAPIServerLister(apiServerInformer.Lister(), configClient)
+
+ return nil
+}
+
+// tlsProfileChanged checks if the TLS security profile has changed between two APIServer resources
+func tlsProfileChanged(old, new *configv1.APIServer) bool {
+ oldProfile := old.Spec.TLSSecurityProfile
+ newProfile := new.Spec.TLSSecurityProfile
+
+ // Both nil - no change
+ if oldProfile == nil && newProfile == nil {
+ return false
+ }
+
+ // One nil, one not - changed
+ if (oldProfile == nil) != (newProfile == nil) {
+ return true
+ }
+
+ // Different types - changed
+ if oldProfile.Type != newProfile.Type {
+ return true
+ }
+
+ // For custom profiles, check the actual settings
+ if oldProfile.Type == configv1.TLSProfileCustomType {
+ return !customProfilesEqual(oldProfile.Custom, newProfile.Custom)
+ }
+
+ // For predefined profiles (Old, Intermediate, Modern), type change is sufficient
+ return false
+}
+
+// customProfilesEqual checks if two custom TLS profiles are equal
+func customProfilesEqual(old, new *configv1.CustomTLSProfile) bool {
+ if old == nil && new == nil {
+ return true
+ }
+ if (old == nil) != (new == nil) {
+ return false
+ }
+
+ if old.MinTLSVersion != new.MinTLSVersion {
+ return false
+ }
+
+ if len(old.Ciphers) != len(new.Ciphers) {
+ return false
+ }
+ for i := range old.Ciphers {
+ if old.Ciphers[i] != new.Ciphers[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/pkg/reconciler/openshift/tektonconfig/extension.go b/pkg/reconciler/openshift/tektonconfig/extension.go
index 000c21116c..229bd9d508 100644
--- a/pkg/reconciler/openshift/tektonconfig/extension.go
+++ b/pkg/reconciler/openshift/tektonconfig/extension.go
@@ -199,6 +199,10 @@ func (oe openshiftExtension) PostReconcile(ctx context.Context, comp v1alpha1.Te
return oe.consolePluginReconciler.reconcile(ctx, configInstance)
}
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
+
func (oe openshiftExtension) Finalize(ctx context.Context, comp v1alpha1.TektonComponent) error {
configInstance := comp.(*v1alpha1.TektonConfig)
if configInstance.Spec.Profile == v1alpha1.ProfileAll {
diff --git a/pkg/reconciler/openshift/tektonhub/extension.go b/pkg/reconciler/openshift/tektonhub/extension.go
index 9432d9609c..9b3da36f4a 100644
--- a/pkg/reconciler/openshift/tektonhub/extension.go
+++ b/pkg/reconciler/openshift/tektonhub/extension.go
@@ -357,6 +357,10 @@ func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent)
return nil
}
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
+
// Get the Host value of the Route created
func getRouteHost(manifest *mf.Manifest, routeName string) (string, error) {
var hostUrl string
diff --git a/pkg/reconciler/openshift/tektonmulticlusterproxyaae/extension.go b/pkg/reconciler/openshift/tektonmulticlusterproxyaae/extension.go
index 42ca5a3c40..0e95a61a9c 100644
--- a/pkg/reconciler/openshift/tektonmulticlusterproxyaae/extension.go
+++ b/pkg/reconciler/openshift/tektonmulticlusterproxyaae/extension.go
@@ -57,3 +57,7 @@ func (oe openshiftExtension) PostReconcile(context.Context, v1alpha1.TektonCompo
func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent) error {
return nil
}
+
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/openshift/tektonpipeline/extension.go b/pkg/reconciler/openshift/tektonpipeline/extension.go
index 7b027e0fdd..0e6d3322d3 100644
--- a/pkg/reconciler/openshift/tektonpipeline/extension.go
+++ b/pkg/reconciler/openshift/tektonpipeline/extension.go
@@ -134,6 +134,10 @@ func (oe openshiftExtension) Finalize(ctx context.Context, comp v1alpha1.TektonC
return nil
}
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
+
func preManifest() (*mf.Manifest, error) {
koDataDir := os.Getenv(common.KoEnvKey)
manifest := &mf.Manifest{}
diff --git a/pkg/reconciler/openshift/tektonpruner/extension.go b/pkg/reconciler/openshift/tektonpruner/extension.go
index 0d96343e51..a7b9744c57 100644
--- a/pkg/reconciler/openshift/tektonpruner/extension.go
+++ b/pkg/reconciler/openshift/tektonpruner/extension.go
@@ -46,3 +46,7 @@ func (oe openshiftExtension) PostReconcile(context.Context, v1alpha1.TektonCompo
func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent) error {
return nil
}
+
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/openshift/tektonresult/controller.go b/pkg/reconciler/openshift/tektonresult/controller.go
index eb2914db86..f9716446f0 100644
--- a/pkg/reconciler/openshift/tektonresult/controller.go
+++ b/pkg/reconciler/openshift/tektonresult/controller.go
@@ -19,9 +19,10 @@ package tektonresult
import (
"context"
- k8s_ctrl "github.com/tektoncd/operator/pkg/reconciler/kubernetes/tektonresult"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
+
+ k8s_ctrl "github.com/tektoncd/operator/pkg/reconciler/kubernetes/tektonresult"
)
// NewController initializes the controller and is called by the generated code
diff --git a/pkg/reconciler/openshift/tektonresult/extension.go b/pkg/reconciler/openshift/tektonresult/extension.go
index c432b8c094..5828cbe1ba 100644
--- a/pkg/reconciler/openshift/tektonresult/extension.go
+++ b/pkg/reconciler/openshift/tektonresult/extension.go
@@ -18,21 +18,24 @@ package tektonresult
import (
"context"
+ "fmt"
"os"
"path/filepath"
"strings"
mf "github.com/manifestival/manifestival"
- "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1"
- operatorclient "github.com/tektoncd/operator/pkg/client/injection/client"
- "github.com/tektoncd/operator/pkg/reconciler/common"
- "github.com/tektoncd/operator/pkg/reconciler/kubernetes/tektoninstallerset/client"
- occommon "github.com/tektoncd/operator/pkg/reconciler/openshift/common"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"knative.dev/pkg/logging"
+
+ "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1"
+ operatorclient "github.com/tektoncd/operator/pkg/client/injection/client"
+ tektonConfiginformer "github.com/tektoncd/operator/pkg/client/injection/informers/operator/v1alpha1/tektonconfig"
+ "github.com/tektoncd/operator/pkg/reconciler/common"
+ "github.com/tektoncd/operator/pkg/reconciler/kubernetes/tektoninstallerset/client"
+ occommon "github.com/tektoncd/operator/pkg/reconciler/openshift/common"
)
const (
@@ -71,11 +74,15 @@ func OpenShiftExtension(ctx context.Context) common.Extension {
logger.Fatalf("Failed to fetch logs RBAC manifest: %v", err)
}
+ // Get TektonConfig lister to check EnableCentralTLSConfig flag
+ tektonConfigLister := tektonConfiginformer.Get(ctx).Lister()
+
ext := &openshiftExtension{
installerSetClient: client.NewInstallerSetClient(operatorclient.Get(ctx).OperatorV1alpha1().TektonInstallerSets(),
version, "results-ext", v1alpha1.KindTektonResult, nil),
- routeManifest: routeManifest,
- logsRBACManifest: logsRBACManifest,
+ routeManifest: routeManifest,
+ logsRBACManifest: logsRBACManifest,
+ tektonConfigLister: tektonConfigLister,
}
return ext
}
@@ -84,12 +91,14 @@ type openshiftExtension struct {
installerSetClient *client.InstallerSetClient
routeManifest *mf.Manifest
logsRBACManifest *mf.Manifest
+ tektonConfigLister occommon.TektonConfigLister
+ resolvedTLSConfig *occommon.TLSEnvVars
}
-func (oe openshiftExtension) Transformers(comp v1alpha1.TektonComponent) []mf.Transformer {
+func (oe *openshiftExtension) Transformers(comp v1alpha1.TektonComponent) []mf.Transformer {
instance := comp.(*v1alpha1.TektonResult)
- return []mf.Transformer{
+ transformers := []mf.Transformer{
occommon.RemoveRunAsUser(),
occommon.RemoveRunAsGroup(),
occommon.ApplyCABundlesToDeployment,
@@ -101,18 +110,44 @@ func (oe openshiftExtension) Transformers(comp v1alpha1.TektonComponent) []mf.Tr
injectResultsAPIServiceCACert(instance.Spec.ResultsAPIProperties),
injectPostgresUpgradeSupport(),
}
+
+ // Use TLS config resolved in PreReconcile
+ if oe.resolvedTLSConfig != nil {
+ transformers = append(transformers, occommon.InjectTLSEnvVars(oe.resolvedTLSConfig, "Deployment", deploymentAPI, []string{apiContainerName}))
+ }
+
+ return transformers
+}
+
+// GetPlatformData returns TLS config fingerprint for hash computation.
+// This ensures installer set is updated when TLS config changes.
+func (oe *openshiftExtension) GetPlatformData() string {
+ if oe.resolvedTLSConfig == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s:%s:%s", oe.resolvedTLSConfig.MinVersion, oe.resolvedTLSConfig.CipherSuites, oe.resolvedTLSConfig.CurvePreferences)
}
func (oe *openshiftExtension) PreReconcile(ctx context.Context, tc v1alpha1.TektonComponent) error {
+ logger := logging.FromContext(ctx)
result := tc.(*v1alpha1.TektonResult)
- mf := mf.Manifest{}
+ manifest := mf.Manifest{}
if (result.Spec.LokiStackName != "" && result.Spec.LokiStackNamespace != "") ||
strings.EqualFold(result.Spec.LogsType, "LOKI") {
- mf = mf.Append(*oe.logsRBACManifest)
+ manifest = manifest.Append(*oe.logsRBACManifest)
+ }
+
+ resolvedTLS, err := occommon.ResolveCentralTLSToEnvVars(ctx, oe.tektonConfigLister)
+ if err != nil {
+ return err
+ }
+ oe.resolvedTLSConfig = resolvedTLS
+ if oe.resolvedTLSConfig != nil {
+ logger.Infof("Injecting central TLS config: MinVersion=%s", oe.resolvedTLSConfig.MinVersion)
}
- return oe.installerSetClient.PreSet(ctx, tc, &mf, filterAndTransform())
+ return oe.installerSetClient.PreSet(ctx, tc, &manifest, filterAndTransform())
}
func (oe openshiftExtension) PostReconcile(ctx context.Context, tc v1alpha1.TektonComponent) error {
diff --git a/pkg/reconciler/openshift/tektonscheduler/extension.go b/pkg/reconciler/openshift/tektonscheduler/extension.go
index 61fa75fddc..b5554ff801 100644
--- a/pkg/reconciler/openshift/tektonscheduler/extension.go
+++ b/pkg/reconciler/openshift/tektonscheduler/extension.go
@@ -46,3 +46,7 @@ func (oe openshiftExtension) PostReconcile(context.Context, v1alpha1.TektonCompo
func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent) error {
return nil
}
+
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/pkg/reconciler/openshift/tektontrigger/extension.go b/pkg/reconciler/openshift/tektontrigger/extension.go
index 6ddf9eb547..5903c162a7 100644
--- a/pkg/reconciler/openshift/tektontrigger/extension.go
+++ b/pkg/reconciler/openshift/tektontrigger/extension.go
@@ -67,3 +67,7 @@ func (oe openshiftExtension) PostReconcile(context.Context, v1alpha1.TektonCompo
func (oe openshiftExtension) Finalize(context.Context, v1alpha1.TektonComponent) error {
return nil
}
+
+func (oe openshiftExtension) GetPlatformData() string {
+ return ""
+}
diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/github.com/evanphx/json-patch/.gitignore
new file mode 100644
index 0000000000..b7ed7f956d
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/.gitignore
@@ -0,0 +1,6 @@
+# editor and IDE paraphernalia
+.idea
+.vscode
+
+# macOS paraphernalia
+.DS_Store
diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE
new file mode 100644
index 0000000000..df76d7d771
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2014, Evan Phoenix
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the Evan Phoenix nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md
new file mode 100644
index 0000000000..97e319b21b
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/README.md
@@ -0,0 +1,317 @@
+# JSON-Patch
+`jsonpatch` is a library which provides functionality for both applying
+[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
+well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
+
+[](http://godoc.org/github.com/evanphx/json-patch)
+[](https://github.com/evanphx/json-patch/actions/workflows/go.yml)
+[](https://goreportcard.com/report/github.com/evanphx/json-patch)
+
+# Get It!
+
+**Latest and greatest**:
+```bash
+go get -u github.com/evanphx/json-patch/v5
+```
+
+**Stable Versions**:
+* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
+* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
+
+(previous versions below `v3` are unavailable)
+
+# Use It!
+* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
+* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
+* [Comparing JSON documents](#comparing-json-documents)
+* [Combine merge patches](#combine-merge-patches)
+
+
+# Configuration
+
+* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
+ This defaults to `true` and enables the non-standard practice of allowing
+ negative indices to mean indices starting at the end of an array. This
+ functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
+ false`.
+
+* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
+ which limits the total size increase in bytes caused by "copy" operations in a
+ patch. It defaults to 0, which means there is no limit.
+
+These global variables control the behavior of `jsonpatch.Apply`.
+
+An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior
+is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`.
+
+Structure `jsonpatch.ApplyOptions` includes the configuration options above
+and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`.
+
+When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore
+`remove` operations whose `path` points to a non-existent location in the JSON document.
+`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions`
+returning an error when hitting a missing `path` on `remove`.
+
+When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure
+that `add` operations produce all the `path` elements that are missing from the target object.
+
+Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions`
+whose values are populated from the global configuration variables.
+
+## Create and apply a merge patch
+Given both an original JSON document and a modified JSON document, you can create
+a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
+
+It can describe the changes needed to convert from the original to the
+modified JSON document.
+
+Once you have a merge patch, you can apply it to other JSON documents using the
+`jsonpatch.MergePatch(document, patch)` function.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ // Let's create a merge patch from these two documents...
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ target := []byte(`{"name": "Jane", "age": 24}`)
+
+ patch, err := jsonpatch.CreateMergePatch(original, target)
+ if err != nil {
+ panic(err)
+ }
+
+ // Now lets apply the patch against a different JSON document...
+
+ alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
+ modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
+
+ fmt.Printf("patch document: %s\n", patch)
+ fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+patch document: {"height":null,"name":"Jane"}
+updated alternative doc: {"age":28,"name":"Jane"}
+```
+
+## Create and apply a JSON Patch
+You can create patch objects using `DecodePatch([]byte)`, which can then
+be applied against JSON documents.
+
+The following is an example of creating a patch from two operations, and
+applying it against a JSON document.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ patchJSON := []byte(`[
+ {"op": "replace", "path": "/name", "value": "Jane"},
+ {"op": "remove", "path": "/height"}
+ ]`)
+
+ patch, err := jsonpatch.DecodePatch(patchJSON)
+ if err != nil {
+ panic(err)
+ }
+
+ modified, err := patch.Apply(original)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("Original document: %s\n", original)
+ fmt.Printf("Modified document: %s\n", modified)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+Original document: {"name": "John", "age": 24, "height": 3.21}
+Modified document: {"age":24,"name":"Jane"}
+```
+
+## Comparing JSON documents
+Due to potential whitespace and ordering differences, one cannot simply compare
+JSON strings or byte-arrays directly.
+
+As such, you can instead use `jsonpatch.Equal(document1, document2)` to
+determine if two JSON documents are _structurally_ equal. This ignores
+whitespace differences, and key-value ordering.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+ similar := []byte(`
+ {
+ "age": 24,
+ "height": 3.21,
+ "name": "John"
+ }
+ `)
+ different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
+
+ if jsonpatch.Equal(original, similar) {
+ fmt.Println(`"original" is structurally equal to "similar"`)
+ }
+
+ if !jsonpatch.Equal(original, different) {
+ fmt.Println(`"original" is _not_ structurally equal to "different"`)
+ }
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+"original" is structurally equal to "similar"
+"original" is _not_ structurally equal to "different"
+```
+
+## Combine merge patches
+Given two JSON merge patch documents, it is possible to combine them into a
+single merge patch which can describe both set of changes.
+
+The resulting merge patch can be used such that applying it results in a
+document structurally similar as merging each merge patch to the document
+in succession.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+ original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+
+ nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
+ ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
+
+ // Let's combine these merge patch documents...
+ combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
+ if err != nil {
+ panic(err)
+ }
+
+ // Apply each patch individual against the original document
+ withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
+ if err != nil {
+ panic(err)
+ }
+
+ withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
+ if err != nil {
+ panic(err)
+ }
+
+ // Apply the combined patch against the original document
+
+ withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
+ if err != nil {
+ panic(err)
+ }
+
+ // Do both result in the same thing? They should!
+ if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
+ fmt.Println("Both JSON documents are structurally the same!")
+ }
+
+ fmt.Printf("combined merge patch: %s", combinedPatch)
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+Both JSON documents are structurally the same!
+combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
+```
+
+# CLI for comparing JSON documents
+You can install the commandline program `json-patch`.
+
+This program can take multiple JSON patch documents as arguments,
+and fed a JSON document from `stdin`. It will apply the patch(es) against
+the document and output the modified doc.
+
+**patch.1.json**
+```json
+[
+ {"op": "replace", "path": "/name", "value": "Jane"},
+ {"op": "remove", "path": "/height"}
+]
+```
+
+**patch.2.json**
+```json
+[
+ {"op": "add", "path": "/address", "value": "123 Main St"},
+ {"op": "replace", "path": "/age", "value": "21"}
+]
+```
+
+**document.json**
+```json
+{
+ "name": "John",
+ "age": 24,
+ "height": 3.21
+}
+```
+
+You can then run:
+
+```bash
+$ go install github.com/evanphx/json-patch/cmd/json-patch
+$ cat document.json | json-patch -p patch.1.json -p patch.2.json
+{"address":"123 Main St","age":"21","name":"Jane"}
+```
+
+# Help It!
+Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
+or [create a PR](https://github.com/evanphx/json-patch/compare).
+
+
+Before creating a pull request, we'd ask that you make sure tests are passing
+and that you have added new tests when applicable.
+
+Contributors can run tests using:
+
+```bash
+go test -cover ./...
+```
+
+Builds for pull requests are tested automatically
+using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml).
diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go
new file mode 100644
index 0000000000..75304b4437
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/errors.go
@@ -0,0 +1,38 @@
+package jsonpatch
+
+import "fmt"
+
+// AccumulatedCopySizeError is an error type returned when the accumulated size
+// increase caused by copy operations in a patch operation has exceeded the
+// limit.
+type AccumulatedCopySizeError struct {
+ limit int64
+ accumulated int64
+}
+
+// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
+func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
+ return &AccumulatedCopySizeError{limit: l, accumulated: a}
+}
+
+// Error implements the error interface.
+func (a *AccumulatedCopySizeError) Error() string {
+ return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
+}
+
+// ArraySizeError is an error type returned when the array size has exceeded
+// the limit.
+type ArraySizeError struct {
+ limit int
+ size int
+}
+
+// NewArraySizeError returns an ArraySizeError.
+func NewArraySizeError(l, s int) *ArraySizeError {
+ return &ArraySizeError{limit: l, size: s}
+}
+
+// Error implements the error interface.
+func (a *ArraySizeError) Error() string {
+ return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
+}
diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go
new file mode 100644
index 0000000000..ad88d40181
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/merge.go
@@ -0,0 +1,389 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
+ curDoc, err := cur.intoDoc()
+
+ if err != nil {
+ pruneNulls(patch)
+ return patch
+ }
+
+ patchDoc, err := patch.intoDoc()
+
+ if err != nil {
+ return patch
+ }
+
+ mergeDocs(curDoc, patchDoc, mergeMerge)
+
+ return cur
+}
+
+func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
+ for k, v := range *patch {
+ if v == nil {
+ if mergeMerge {
+ (*doc)[k] = nil
+ } else {
+ delete(*doc, k)
+ }
+ } else {
+ cur, ok := (*doc)[k]
+
+ if !ok || cur == nil {
+ if !mergeMerge {
+ pruneNulls(v)
+ }
+
+ (*doc)[k] = v
+ } else {
+ (*doc)[k] = merge(cur, v, mergeMerge)
+ }
+ }
+ }
+}
+
+func pruneNulls(n *lazyNode) {
+ sub, err := n.intoDoc()
+
+ if err == nil {
+ pruneDocNulls(sub)
+ } else {
+ ary, err := n.intoAry()
+
+ if err == nil {
+ pruneAryNulls(ary)
+ }
+ }
+}
+
+func pruneDocNulls(doc *partialDoc) *partialDoc {
+ for k, v := range *doc {
+ if v == nil {
+ delete(*doc, k)
+ } else {
+ pruneNulls(v)
+ }
+ }
+
+ return doc
+}
+
+func pruneAryNulls(ary *partialArray) *partialArray {
+ newAry := []*lazyNode{}
+
+ for _, v := range *ary {
+ if v != nil {
+ pruneNulls(v)
+ }
+ newAry = append(newAry, v)
+ }
+
+ *ary = newAry
+
+ return ary
+}
+
+var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
+
+// MergeMergePatches merges two merge patches together, such that
+// applying this resulting merged merge patch to a document yields the same
+// as merging each merge patch to the document in succession.
+func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
+ return doMergePatch(patch1Data, patch2Data, true)
+}
+
+// MergePatch merges the patchData into the docData.
+func MergePatch(docData, patchData []byte) ([]byte, error) {
+ return doMergePatch(docData, patchData, false)
+}
+
+func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
+ doc := &partialDoc{}
+
+ docErr := json.Unmarshal(docData, doc)
+
+ patch := &partialDoc{}
+
+ patchErr := json.Unmarshal(patchData, patch)
+
+ if _, ok := docErr.(*json.SyntaxError); ok {
+ return nil, ErrBadJSONDoc
+ }
+
+ if _, ok := patchErr.(*json.SyntaxError); ok {
+ return nil, ErrBadJSONPatch
+ }
+
+ if docErr == nil && *doc == nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ if patchErr == nil && *patch == nil {
+ return nil, ErrBadJSONPatch
+ }
+
+ if docErr != nil || patchErr != nil {
+ // Not an error, just not a doc, so we turn straight into the patch
+ if patchErr == nil {
+ if mergeMerge {
+ doc = patch
+ } else {
+ doc = pruneDocNulls(patch)
+ }
+ } else {
+ patchAry := &partialArray{}
+ patchErr = json.Unmarshal(patchData, patchAry)
+
+ if patchErr != nil {
+ return nil, ErrBadJSONPatch
+ }
+
+ pruneAryNulls(patchAry)
+
+ out, patchErr := json.Marshal(patchAry)
+
+ if patchErr != nil {
+ return nil, ErrBadJSONPatch
+ }
+
+ return out, nil
+ }
+ } else {
+ mergeDocs(doc, patch, mergeMerge)
+ }
+
+ return json.Marshal(doc)
+}
+
+// resemblesJSONArray indicates whether the byte-slice "appears" to be
+// a JSON array or not.
+// False-positives are possible, as this function does not check the internal
+// structure of the array. It only checks that the outer syntax is present and
+// correct.
+func resemblesJSONArray(input []byte) bool {
+ input = bytes.TrimSpace(input)
+
+ hasPrefix := bytes.HasPrefix(input, []byte("["))
+ hasSuffix := bytes.HasSuffix(input, []byte("]"))
+
+ return hasPrefix && hasSuffix
+}
+
+// CreateMergePatch will return a merge patch document capable of converting
+// the original document(s) to the modified document(s).
+// The parameters can be bytes of either two JSON Documents, or two arrays of
+// JSON documents.
+// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
+func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalResemblesArray := resemblesJSONArray(originalJSON)
+ modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
+
+ // Do both byte-slices seem like JSON arrays?
+ if originalResemblesArray && modifiedResemblesArray {
+ return createArrayMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // Are both byte-slices are not arrays? Then they are likely JSON objects...
+ if !originalResemblesArray && !modifiedResemblesArray {
+ return createObjectMergePatch(originalJSON, modifiedJSON)
+ }
+
+ // None of the above? Then return an error because of mismatched types.
+ return nil, errBadMergeTypes
+}
+
+// createObjectMergePatch will return a merge-patch document capable of
+// converting the original document to the modified document.
+func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDoc := map[string]interface{}{}
+ modifiedDoc := map[string]interface{}{}
+
+ err := json.Unmarshal(originalJSON, &originalDoc)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDoc)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ dest, err := getDiff(originalDoc, modifiedDoc)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(dest)
+}
+
+// createArrayMergePatch will return an array of merge-patch documents capable
+// of converting the original document to the modified document for each
+// pair of JSON documents provided in the arrays.
+// Arrays of mismatched sizes will result in an error.
+func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+ originalDocs := []json.RawMessage{}
+ modifiedDocs := []json.RawMessage{}
+
+ err := json.Unmarshal(originalJSON, &originalDocs)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ err = json.Unmarshal(modifiedJSON, &modifiedDocs)
+ if err != nil {
+ return nil, ErrBadJSONDoc
+ }
+
+ total := len(originalDocs)
+ if len(modifiedDocs) != total {
+ return nil, ErrBadJSONDoc
+ }
+
+ result := []json.RawMessage{}
+ for i := 0; i < len(originalDocs); i++ {
+ original := originalDocs[i]
+ modified := modifiedDocs[i]
+
+ patch, err := createObjectMergePatch(original, modified)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, json.RawMessage(patch))
+ }
+
+ return json.Marshal(result)
+}
+
+// Returns true if the array matches (must be json types).
+// As is idiomatic for go, an empty array is not the same as a nil array.
+func matchesArray(a, b []interface{}) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ if (a == nil && b != nil) || (a != nil && b == nil) {
+ return false
+ }
+ for i := range a {
+ if !matchesValue(a[i], b[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Returns true if the values matches (must be json types)
+// The types of the values must match, otherwise it will always return false
+// If two map[string]interface{} are given, all elements must match.
+func matchesValue(av, bv interface{}) bool {
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ return false
+ }
+ switch at := av.(type) {
+ case string:
+ bt := bv.(string)
+ if bt == at {
+ return true
+ }
+ case float64:
+ bt := bv.(float64)
+ if bt == at {
+ return true
+ }
+ case bool:
+ bt := bv.(bool)
+ if bt == at {
+ return true
+ }
+ case nil:
+ // Both nil, fine.
+ return true
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ if len(bt) != len(at) {
+ return false
+ }
+ for key := range bt {
+ av, aOK := at[key]
+ bv, bOK := bt[key]
+ if aOK != bOK {
+ return false
+ }
+ if !matchesValue(av, bv) {
+ return false
+ }
+ }
+ return true
+ case []interface{}:
+ bt := bv.([]interface{})
+ return matchesArray(at, bt)
+ }
+ return false
+}
+
+// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
+func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
+ into := map[string]interface{}{}
+ for key, bv := range b {
+ av, ok := a[key]
+ // value was added
+ if !ok {
+ into[key] = bv
+ continue
+ }
+ // If types have changed, replace completely
+ if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+ into[key] = bv
+ continue
+ }
+ // Types are the same, compare values
+ switch at := av.(type) {
+ case map[string]interface{}:
+ bt := bv.(map[string]interface{})
+ dst := make(map[string]interface{}, len(bt))
+ dst, err := getDiff(at, bt)
+ if err != nil {
+ return nil, err
+ }
+ if len(dst) > 0 {
+ into[key] = dst
+ }
+ case string, float64, bool:
+ if !matchesValue(av, bv) {
+ into[key] = bv
+ }
+ case []interface{}:
+ bt := bv.([]interface{})
+ if !matchesArray(at, bt) {
+ into[key] = bv
+ }
+ case nil:
+ switch bv.(type) {
+ case nil:
+ // Both nil, fine.
+ default:
+ into[key] = bv
+ }
+ default:
+ panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
+ }
+ }
+ // Now add all deleted values as nil
+ for key := range a {
+ _, found := b[key]
+ if !found {
+ into[key] = nil
+ }
+ }
+ return into, nil
+}
diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go
new file mode 100644
index 0000000000..cd0274e1e4
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/patch.go
@@ -0,0 +1,851 @@
+package jsonpatch
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+const (
+ eRaw = iota
+ eDoc
+ eAry
+)
+
+var (
+ // SupportNegativeIndices decides whether to support non-standard practice of
+ // allowing negative indices to mean indices starting at the end of an array.
+ // Default to true.
+ SupportNegativeIndices bool = true
+ // AccumulatedCopySizeLimit limits the total size increase in bytes caused by
+ // "copy" operations in a patch.
+ AccumulatedCopySizeLimit int64 = 0
+)
+
+var (
+ ErrTestFailed = errors.New("test failed")
+ ErrMissing = errors.New("missing value")
+ ErrUnknownType = errors.New("unknown object type")
+ ErrInvalid = errors.New("invalid state detected")
+ ErrInvalidIndex = errors.New("invalid index referenced")
+)
+
+type lazyNode struct {
+ raw *json.RawMessage
+ doc partialDoc
+ ary partialArray
+ which int
+}
+
+// Operation is a single JSON-Patch step, such as a single 'add' operation.
+type Operation map[string]*json.RawMessage
+
+// Patch is an ordered collection of Operations.
+type Patch []Operation
+
+type partialDoc map[string]*lazyNode
+type partialArray []*lazyNode
+
+type container interface {
+ get(key string) (*lazyNode, error)
+ set(key string, val *lazyNode) error
+ add(key string, val *lazyNode) error
+ remove(key string) error
+}
+
+func newLazyNode(raw *json.RawMessage) *lazyNode {
+ return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
+}
+
+func (n *lazyNode) MarshalJSON() ([]byte, error) {
+ switch n.which {
+ case eRaw:
+ return json.Marshal(n.raw)
+ case eDoc:
+ return json.Marshal(n.doc)
+ case eAry:
+ return json.Marshal(n.ary)
+ default:
+ return nil, ErrUnknownType
+ }
+}
+
+func (n *lazyNode) UnmarshalJSON(data []byte) error {
+ dest := make(json.RawMessage, len(data))
+ copy(dest, data)
+ n.raw = &dest
+ n.which = eRaw
+ return nil
+}
+
+func deepCopy(src *lazyNode) (*lazyNode, int, error) {
+ if src == nil {
+ return nil, 0, nil
+ }
+ a, err := src.MarshalJSON()
+ if err != nil {
+ return nil, 0, err
+ }
+ sz := len(a)
+ ra := make(json.RawMessage, sz)
+ copy(ra, a)
+ return newLazyNode(&ra), sz, nil
+}
+
+func (n *lazyNode) intoDoc() (*partialDoc, error) {
+ if n.which == eDoc {
+ return &n.doc, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eDoc
+ return &n.doc, nil
+}
+
+func (n *lazyNode) intoAry() (*partialArray, error) {
+ if n.which == eAry {
+ return &n.ary, nil
+ }
+
+ if n.raw == nil {
+ return nil, ErrInvalid
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return nil, err
+ }
+
+ n.which = eAry
+ return &n.ary, nil
+}
+
+func (n *lazyNode) compact() []byte {
+ buf := &bytes.Buffer{}
+
+ if n.raw == nil {
+ return nil
+ }
+
+ err := json.Compact(buf, *n.raw)
+
+ if err != nil {
+ return *n.raw
+ }
+
+ return buf.Bytes()
+}
+
+func (n *lazyNode) tryDoc() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.doc)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eDoc
+ return true
+}
+
+func (n *lazyNode) tryAry() bool {
+ if n.raw == nil {
+ return false
+ }
+
+ err := json.Unmarshal(*n.raw, &n.ary)
+
+ if err != nil {
+ return false
+ }
+
+ n.which = eAry
+ return true
+}
+
+func (n *lazyNode) equal(o *lazyNode) bool {
+ if n.which == eRaw {
+ if !n.tryDoc() && !n.tryAry() {
+ if o.which != eRaw {
+ return false
+ }
+
+ return bytes.Equal(n.compact(), o.compact())
+ }
+ }
+
+ if n.which == eDoc {
+ if o.which == eRaw {
+ if !o.tryDoc() {
+ return false
+ }
+ }
+
+ if o.which != eDoc {
+ return false
+ }
+
+ if len(n.doc) != len(o.doc) {
+ return false
+ }
+
+ for k, v := range n.doc {
+ ov, ok := o.doc[k]
+
+ if !ok {
+ return false
+ }
+
+ if (v == nil) != (ov == nil) {
+ return false
+ }
+
+ if v == nil && ov == nil {
+ continue
+ }
+
+ if !v.equal(ov) {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ if o.which != eAry && !o.tryAry() {
+ return false
+ }
+
+ if len(n.ary) != len(o.ary) {
+ return false
+ }
+
+ for idx, val := range n.ary {
+ if !val.equal(o.ary[idx]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Kind reads the "op" field of the Operation.
+func (o Operation) Kind() string {
+ if obj, ok := o["op"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown"
+ }
+
+ return op
+ }
+
+ return "unknown"
+}
+
+// Path reads the "path" field of the Operation.
+func (o Operation) Path() (string, error) {
+ if obj, ok := o["path"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+}
+
+// From reads the "from" field of the Operation.
+func (o Operation) From() (string, error) {
+ if obj, ok := o["from"]; ok && obj != nil {
+ var op string
+
+ err := json.Unmarshal(*obj, &op)
+
+ if err != nil {
+ return "unknown", err
+ }
+
+ return op, nil
+ }
+
+ return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+}
+
+func (o Operation) value() *lazyNode {
+ if obj, ok := o["value"]; ok {
+ return newLazyNode(obj)
+ }
+
+ return nil
+}
+
+// ValueInterface decodes the operation value into an interface.
+func (o Operation) ValueInterface() (interface{}, error) {
+ if obj, ok := o["value"]; ok && obj != nil {
+ var v interface{}
+
+ err := json.Unmarshal(*obj, &v)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return v, nil
+ }
+
+ return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+}
+
+func isArray(buf []byte) bool {
+Loop:
+ for _, c := range buf {
+ switch c {
+ case ' ':
+ case '\n':
+ case '\t':
+ continue
+ case '[':
+ return true
+ default:
+ break Loop
+ }
+ }
+
+ return false
+}
+
+func findObject(pd *container, path string) (container, string) {
+ doc := *pd
+
+ split := strings.Split(path, "/")
+
+ if len(split) < 2 {
+ return nil, ""
+ }
+
+ parts := split[1 : len(split)-1]
+
+ key := split[len(split)-1]
+
+ var err error
+
+ for _, part := range parts {
+
+ next, ok := doc.get(decodePatchKey(part))
+
+ if next == nil || ok != nil || next.raw == nil {
+ return nil, ""
+ }
+
+ if isArray(*next.raw) {
+ doc, err = next.intoAry()
+
+ if err != nil {
+ return nil, ""
+ }
+ } else {
+ doc, err = next.intoDoc()
+
+ if err != nil {
+ return nil, ""
+ }
+ }
+ }
+
+ return doc, decodePatchKey(key)
+}
+
+func (d *partialDoc) set(key string, val *lazyNode) error {
+ (*d)[key] = val
+ return nil
+}
+
+func (d *partialDoc) add(key string, val *lazyNode) error {
+ (*d)[key] = val
+ return nil
+}
+
+func (d *partialDoc) get(key string) (*lazyNode, error) {
+ return (*d)[key], nil
+}
+
+func (d *partialDoc) remove(key string) error {
+ _, ok := (*d)[key]
+ if !ok {
+ return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
+ }
+
+ delete(*d, key)
+ return nil
+}
+
+// set should only be used to implement the "replace" operation, so "key" must
+// be an already existing index in "d".
+func (d *partialArray) set(key string, val *lazyNode) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(*d) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(*d)
+ }
+
+ (*d)[idx] = val
+ return nil
+}
+
+func (d *partialArray) add(key string, val *lazyNode) error {
+ if key == "-" {
+ *d = append(*d, val)
+ return nil
+ }
+
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+ }
+
+ sz := len(*d) + 1
+
+ ary := make([]*lazyNode, sz)
+
+ cur := *d
+
+ if idx >= len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(ary) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(ary)
+ }
+
+ copy(ary[0:idx], cur[0:idx])
+ ary[idx] = val
+ copy(ary[idx+1:], cur[idx:])
+
+ *d = ary
+ return nil
+}
+
+func (d *partialArray) get(key string) (*lazyNode, error) {
+ idx, err := strconv.Atoi(key)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(*d) {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(*d)
+ }
+
+ if idx >= len(*d) {
+ return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ return (*d)[idx], nil
+}
+
+func (d *partialArray) remove(key string) error {
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ return err
+ }
+
+ cur := *d
+
+ if idx >= len(cur) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+
+ if idx < 0 {
+ if !SupportNegativeIndices {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ if idx < -len(cur) {
+ return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ }
+ idx += len(cur)
+ }
+
+ ary := make([]*lazyNode, len(cur)-1)
+
+ copy(ary[0:idx], cur[0:idx])
+ copy(ary[idx:], cur[idx+1:])
+
+ *d = ary
+ return nil
+
+}
+
+func (p Patch) add(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.add(key, op.value())
+ if err != nil {
+ return errors.Wrapf(err, "error in add for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) remove(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+ }
+
+ err = con.remove(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) replace(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "replace operation failed to decode path")
+ }
+
+ if path == "" {
+ val := op.value()
+
+ if val.which == eRaw {
+ if !val.tryDoc() {
+ if !val.tryAry() {
+ return errors.Wrapf(err, "replace operation value must be object or array")
+ }
+ }
+ }
+
+ switch val.which {
+ case eAry:
+ *doc = &val.ary
+ case eDoc:
+ *doc = &val.doc
+ case eRaw:
+ return errors.Wrapf(err, "replace operation hit impossible case")
+ }
+
+ return nil
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+ }
+
+ _, ok := con.get(key)
+ if ok != nil {
+ return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+ }
+
+ err = con.set(key, op.value())
+ if err != nil {
+ return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) move(doc *container, op Operation) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ err = con.remove(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", key)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "move operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ err = con.add(key, val)
+ if err != nil {
+ return errors.Wrapf(err, "error in move for path: '%s'", path)
+ }
+
+ return nil
+}
+
+func (p Patch) test(doc *container, op Operation) error {
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(err, "test operation failed to decode path")
+ }
+
+ if path == "" {
+ var self lazyNode
+
+ switch sv := (*doc).(type) {
+ case *partialDoc:
+ self.doc = *sv
+ self.which = eDoc
+ case *partialArray:
+ self.ary = *sv
+ self.which = eAry
+ }
+
+ if self.equal(op.value()) {
+ return nil
+ }
+
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ }
+
+ con, key := findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in test for path: '%s'", path)
+ }
+
+ if val == nil {
+ if op.value() == nil || op.value().raw == nil {
+ return nil
+ }
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ } else if op.value() == nil {
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ }
+
+ if val.equal(op.value()) {
+ return nil
+ }
+
+ return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+}
+
+func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
+ from, err := op.From()
+ if err != nil {
+ return errors.Wrapf(err, "copy operation failed to decode from")
+ }
+
+ con, key := findObject(doc, from)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
+ }
+
+ val, err := con.get(key)
+ if err != nil {
+ return errors.Wrapf(err, "error in copy for from: '%s'", from)
+ }
+
+ path, err := op.Path()
+ if err != nil {
+ return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+ }
+
+ con, key = findObject(doc, path)
+
+ if con == nil {
+ return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+ }
+
+ valCopy, sz, err := deepCopy(val)
+ if err != nil {
+ return errors.Wrapf(err, "error while performing deep copy")
+ }
+
+ (*accumulatedCopySize) += int64(sz)
+ if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
+ return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
+ }
+
+ err = con.add(key, valCopy)
+ if err != nil {
+ return errors.Wrapf(err, "error while adding value during copy")
+ }
+
+ return nil
+}
+
+// Equal indicates if 2 JSON documents have the same structural equality.
+func Equal(a, b []byte) bool {
+ ra := make(json.RawMessage, len(a))
+ copy(ra, a)
+ la := newLazyNode(&ra)
+
+ rb := make(json.RawMessage, len(b))
+ copy(rb, b)
+ lb := newLazyNode(&rb)
+
+ return la.equal(lb)
+}
+
+// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
+func DecodePatch(buf []byte) (Patch, error) {
+ var p Patch
+
+ err := json.Unmarshal(buf, &p)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// Apply mutates a JSON document according to the patch, and returns the new
+// document.
+func (p Patch) Apply(doc []byte) ([]byte, error) {
+ return p.ApplyIndent(doc, "")
+}
+
+// ApplyIndent mutates a JSON document according to the patch, and returns the new
+// document indented.
+func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
+ if len(doc) == 0 {
+ return doc, nil
+ }
+
+ var pd container
+ if doc[0] == '[' {
+ pd = &partialArray{}
+ } else {
+ pd = &partialDoc{}
+ }
+
+ err := json.Unmarshal(doc, pd)
+
+ if err != nil {
+ return nil, err
+ }
+
+ err = nil
+
+ var accumulatedCopySize int64
+
+ for _, op := range p {
+ switch op.Kind() {
+ case "add":
+ err = p.add(&pd, op)
+ case "remove":
+ err = p.remove(&pd, op)
+ case "replace":
+ err = p.replace(&pd, op)
+ case "move":
+ err = p.move(&pd, op)
+ case "test":
+ err = p.test(&pd, op)
+ case "copy":
+ err = p.copy(&pd, op, &accumulatedCopySize)
+ default:
+ err = fmt.Errorf("Unexpected kind: %s", op.Kind())
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if indent != "" {
+ return json.MarshalIndent(pd, "", indent)
+ }
+
+ return json.Marshal(pd)
+}
+
+// From http://tools.ietf.org/html/rfc6901#section-4 :
+//
+// Evaluation of each reference token begins by decoding any escaped
+// character sequence. This is performed by first transforming any
+// occurrence of the sequence '~1' to '/', and then transforming any
+// occurrence of the sequence '~0' to '~'.
+
+var (
+ rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
+)
+
+func decodePatchKey(k string) string {
+ return rfc6901Decoder.Replace(k)
+}
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 0000000000..e256a31e00
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 0000000000..0e9d6edc01
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+ - 1.3
+ - 1.4
+script:
+ - go test
+ - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 0000000000..7805d36de7
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 0000000000..0200f75b4d
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"age"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ age: 30
+ name: John
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err = yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 0000000000..5860074026
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ if v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ } else {
+ v = reflect.New(v.Type().Elem())
+ }
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 0000000000..4fb4054a8b
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshalling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml
new file mode 100644
index 0000000000..8a0681af85
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.deepsource.toml
@@ -0,0 +1,12 @@
+version = 1
+
+test_patterns = [
+ "*_test.go"
+]
+
+[[analyzers]]
+name = "go"
+enabled = true
+
+ [analyzers.meta]
+ import_path = "github.com/imdario/mergo"
\ No newline at end of file
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
new file mode 100644
index 0000000000..529c3412ba
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.gitignore
@@ -0,0 +1,33 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 0000000000..dad29725f8
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+install:
+ - go get -t
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+script:
+ - go test -race -v ./...
+after_script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..469b44907a
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 0000000000..686680298d
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 0000000000..876abb500a
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,247 @@
+# Mergo
+
+
+[![GoDoc][3]][4]
+[![GitHub release][5]][6]
+[![GoCard][7]][8]
+[![Build Status][1]][2]
+[![Coverage Status][9]][10]
+[![Sourcegraph][11]][12]
+[![FOSSA Status][13]][14]
+
+[![GoCenter Kudos][15]][16]
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://img.shields.io/github/release/imdario/mergo.svg
+[6]: https://github.com/imdario/mergo/releases
+[7]: https://goreportcard.com/badge/imdario/mergo
+[8]: https://goreportcard.com/report/github.com/imdario/mergo
+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[10]: https://coveralls.io/github/imdario/mergo?branch=master
+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
+[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
+[16]: https://search.gocenter.io/github.com/imdario/mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+### Important note
+
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
+
+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+### Donations
+
+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
+
+
+[](https://beerpay.io/imdario/mergo)
+[](https://beerpay.io/imdario/mergo)
+
+
+### Mergo in the wild
+
+- [moby/moby](https://github.com/moby/moby)
+- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+- [vmware/dispatch](https://github.com/vmware/dispatch)
+- [Shopify/themekit](https://github.com/Shopify/themekit)
+- [imdario/zas](https://github.com/imdario/zas)
+- [matcornic/hermes](https://github.com/matcornic/hermes)
+- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
+- [kataras/iris](https://github.com/kataras/iris)
+- [michaelsauter/crane](https://github.com/michaelsauter/crane)
+- [go-task/task](https://github.com/go-task/task)
+- [sensu/uchiwa](https://github.com/sensu/uchiwa)
+- [ory/hydra](https://github.com/ory/hydra)
+- [sisatech/vcli](https://github.com/sisatech/vcli)
+- [dairycart/dairycart](https://github.com/dairycart/dairycart)
+- [projectcalico/felix](https://github.com/projectcalico/felix)
+- [resin-os/balena](https://github.com/resin-os/balena)
+- [go-kivik/kivik](https://github.com/go-kivik/kivik)
+- [Telefonica/govice](https://github.com/Telefonica/govice)
+- [supergiant/supergiant](supergiant/supergiant)
+- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
+- [janoszen/containerssh](https://github.com/janoszen/containerssh)
+
+## Install
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+Here is a nice example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v2
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+)
+
+type timeTransformer struct {
+}
+
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+type Snapshot struct {
+ Time time.Time
+ // ...
+}
+
+func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## Top Contributors
+
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
+[](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
+
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 0000000000..fcd985f995
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,143 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+Important note
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+Install
+
+Do your usual installation procedure:
+
+ go get github.com/imdario/mergo
+
+ // use in your .go code
+ import (
+ "github.com/imdario/mergo"
+ )
+
+Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ )
+
+ type Foo struct {
+ A string
+ B int64
+ }
+
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+ }
+
+Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/imdario/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
+ }
+
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
+
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+ }
+
+Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+About
+
+Written by Dario Castañé: https://da.rio.hn
+
+License
+
+BSD 3-Clause license, as Go language.
+
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 0000000000..a13a7ee46c
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,178 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Ptr:
+ if dst.IsNil() {
+ v := reflect.New(dst.Type().Elem())
+ dst.Set(v)
+ }
+ dst = dst.Elem()
+ fallthrough
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ config.overwriteWithEmptyValue = true
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, opts...)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: Use Map(…) with WithOverride
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, append(opts, WithOverride)...)
+}
+
+func _map(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerAgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 0000000000..afa84a1e29
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,375 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func hasMergeableFields(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
+func isExportedComponent(field *reflect.StructField) bool {
+ pkgPath := field.PkgPath
+ if len(pkgPath) > 0 {
+ return false
+ }
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ return false
+ }
+ return true
+}
+
+type Config struct {
+ Overwrite bool
+ AppendSlice bool
+ TypeCheck bool
+ Transformers Transformers
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
+}
+
+type Transformers interface {
+ Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
+
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{addr, typ, seen}
+ }
+
+ if config.Transformers != nil && !isEmptyValue(dst) {
+ if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+ err = fn(dst, src)
+ return
+ }
+ }
+
+ switch dst.Kind() {
+ case reflect.Struct:
+ if hasMergeableFields(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ } else {
+ if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
+ dst.Set(src)
+ }
+ }
+ case reflect.Map:
+ if dst.IsNil() && !src.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite {
+ dst.Set(src)
+ }
+ return
+ }
+
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
+ }
+ dst.SetMapIndex(key, dstSlice)
+ }
+ }
+ if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
+ continue
+ }
+
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+ case reflect.Slice:
+ if !dst.CanSet() {
+ break
+ }
+ if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
+
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
+ }
+ default:
+ mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
+ if mustSet {
+ if dst.CanSet() {
+ dst.Set(src)
+ } else {
+ dst = src
+ }
+ }
+ }
+
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+ return func(config *Config) {
+ config.Transformers = transformers
+ }
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+ config.Overwrite = true
+}
+
+// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
+func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
+ config.overwriteWithEmptyValue = true
+}
+
+// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
+func WithAppendSlice(config *Config) {
+ config.AppendSlice = true
+}
+
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerAgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
+
+// IsReflectNil is the reflect value provided nil
+func isReflectNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return v.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 0000000000..3cc926c7f6
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,78 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs and maps are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+ ErrNonPointerAgument = errors.New("dst must be a pointer")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ ptr uintptr
+ typ reflect.Type
+ next *visit
+}
+
+// From src/pkg/encoding/json/encode.go.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return true
+ }
+ return isEmptyValue(v.Elem())
+ case reflect.Func:
+ return v.IsNil()
+ case reflect.Invalid:
+ return true
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml
new file mode 100644
index 0000000000..1e59c02c25
--- /dev/null
+++ b/vendor/github.com/openshift/api/.ci-operator.yaml
@@ -0,0 +1,4 @@
+build_root_image:
+ name: release
+ namespace: openshift
+ tag: rhel-9-release-golang-1.22-openshift-4.17
diff --git a/vendor/github.com/openshift/api/.gitattributes b/vendor/github.com/openshift/api/.gitattributes
new file mode 100644
index 0000000000..124067fe73
--- /dev/null
+++ b/vendor/github.com/openshift/api/.gitattributes
@@ -0,0 +1,7 @@
+# Set unix LF EOL for shell scripts
+*.sh text eol=lf
+
+**/zz_generated.*.go linguist-generated=true
+**/types.generated.go linguist-generated=true
+**/generated.pb.go linguist-generated=true
+**/generated.proto linguist-generated=true
diff --git a/vendor/github.com/openshift/api/.gitignore b/vendor/github.com/openshift/api/.gitignore
new file mode 100644
index 0000000000..1c3e4625d3
--- /dev/null
+++ b/vendor/github.com/openshift/api/.gitignore
@@ -0,0 +1,21 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+.idea/
+_output/
+tests/bin/
+
+models-schema
+/render
+/write-available-featuresets
diff --git a/vendor/github.com/openshift/api/Dockerfile.rhel8 b/vendor/github.com/openshift/api/Dockerfile.rhel8
new file mode 100644
index 0000000000..d4f61d8787
--- /dev/null
+++ b/vendor/github.com/openshift/api/Dockerfile.rhel8
@@ -0,0 +1,23 @@
+FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.17 AS builder
+WORKDIR /go/src/github.com/openshift/api
+COPY . .
+ENV GO_PACKAGE github.com/openshift/api
+RUN make build --warn-undefined-variables
+
+FROM registry.ci.openshift.org/ocp/4.16:base-rhel9
+
+# copy the built binaries to /usr/bin
+COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/
+COPY --from=builder /go/src/github.com/openshift/api/write-available-featuresets /usr/bin/
+
+# this directory is used to produce rendered manifests that the installer applies (but does not maintain) in bootkube
+RUN mkdir -p /usr/share/bootkube/manifests/manifests
+COPY payload-manifests/crds/* /usr/share/bootkube/manifests/manifests
+
+# these are applied by the CVO
+COPY manifests /manifests
+COPY payload-manifests/crds/* /manifests
+COPY payload-manifests/featuregates/* /manifests
+COPY payload-command/empty-resources /manifests
+
+LABEL io.openshift.release.operator true
diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile
new file mode 100644
index 0000000000..7aa6b9bde4
--- /dev/null
+++ b/vendor/github.com/openshift/api/Makefile
@@ -0,0 +1,183 @@
+all: build
+.PHONY: all
+
+update: update-codegen-crds
+
+RUNTIME ?= podman
+RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.22-openshift-4.17
+
+EXCLUDE_DIRS := _output/ dependencymagnet/ hack/ third_party/ tls/ tools/ vendor/ tests/
+GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out $(EXCLUDE_DIRS), $(wildcard */))))
+
+.PHONY: test-unit
+test-unit:
+ go test -v $(GO_PACKAGES)
+
+##################################################################################
+#
+# BEGIN: Update codegen-crds. Defaults to generating updates for all API packages.
+# To run a subset of packages:
+# - Filter by group with make update-codegen-crds-
+# E.g. make update-codegen-crds-machine
+# - Set API_GROUP_VERSIONS to a space separated list of /.
+# E.g. API_GROUP_VERSIONS="apps/v1 build/v1" make update-codegen-crds.
+# FeatureSet generation is controlled at the group level by the
+# .codegen.yaml file.
+#
+##################################################################################
+
+# Ensure update-scripts are run before crd-gen so updates to Godoc are included in CRDs.
+.PHONY: update-codegen-crds
+update-codegen-crds: update-scripts
+ hack/update-codegen-crds.sh
+
+#####################
+#
+# END: Update Codegen
+#
+#####################
+
+.PHONY: verify-scripts
+verify-scripts:
+ bash -x hack/verify-deepcopy.sh
+ bash -x hack/verify-openapi.sh
+ bash -x hack/verify-protobuf.sh
+ bash -x hack/verify-swagger-docs.sh
+ hack/verify-crds.sh
+ bash -x hack/verify-types.sh
+ bash -x hack/verify-compatibility.sh
+ bash -x hack/verify-integration-tests.sh
+ bash -x hack/verify-group-versions.sh
+ bash -x hack/verify-prerelease-lifecycle-gen.sh
+ hack/verify-payload-crds.sh
+ hack/verify-payload-featuregates.sh
+ hack/verify-promoted-features-pass-tests.sh
+
+.PHONY: verify
+verify: verify-scripts verify-crd-schema verify-codegen-crds
+
+.PHONY: verify-codegen-crds
+verify-codegen-crds:
+ bash -x hack/verify-codegen-crds.sh
+
+.PHONY: verify-crd-schema
+verify-crd-schema:
+ bash -x hack/verify-crd-schema-checker.sh
+
+.PHONY: verify-%
+verify-%:
+ make $*
+ git diff --exit-code
+
+################################################################################################
+#
+# BEGIN: Update scripts. Defaults to generating updates for all API packages.
+# Set API_GROUP_VERSIONS to a space separated list of / to limit
+# the scope of the updates. Eg API_GROUP_VERSIONS="apps/v1 build/v1" make update-scripts.
+# Note: Protobuf generation is handled separately, see hack/lib/init.sh.
+#
+################################################################################################
+
+.PHONY: update-scripts
+update-scripts: update-compatibility update-openapi update-deepcopy update-protobuf update-swagger-docs tests-vendor update-prerelease-lifecycle-gen update-payload-crds update-payload-featuregates
+
+.PHONY: update-compatibility
+update-compatibility:
+ hack/update-compatibility.sh
+
+.PHONY: update-openapi
+update-openapi:
+ hack/update-openapi.sh
+
+.PHONY: update-deepcopy
+update-deepcopy:
+ hack/update-deepcopy.sh
+
+.PHONY: update-protobuf
+update-protobuf:
+ hack/update-protobuf.sh
+
+.PHONY: update-swagger-docs
+update-swagger-docs:
+ hack/update-swagger-docs.sh
+
+.PHONY: update-prerelease-lifecycle-gen
+update-prerelease-lifecycle-gen:
+ hack/update-prerelease-lifecycle-gen.sh
+
+.PHONY: update-payload-crds
+update-payload-crds:
+ hack/update-payload-crds.sh
+
+.PHONY: update-payload-featuregates
+update-payload-featuregates:
+ hack/update-payload-featuregates.sh
+
+#####################
+#
+# END: Update scripts
+#
+#####################
+
+deps:
+ go mod tidy
+ go mod vendor
+ go mod verify
+
+verify-with-container:
+ $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make verify
+
+generate-with-container:
+ $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make update
+
+.PHONY: integration
+integration:
+ make -C tests integration
+
+tests-vendor:
+ make -C tests vendor
+
+##################################
+#
+# BEGIN: Build binaries and images
+#
+##################################
+
+.PHONY: build
+build: render write-available-featuresets
+
+render:
+ go build --mod=vendor -trimpath github.com/openshift/api/payload-command/cmd/render
+
+write-available-featuresets:
+ go build --mod=vendor -trimpath github.com/openshift/api/payload-command/cmd/write-available-featuresets
+
+.PHONY: clean
+clean:
+ rm -f render write-available-featuresets models-schema
+ rm -rf tools/_output
+
+VERSION ?= $(shell git describe --always --abbrev=7)
+MUTABLE_TAG ?= latest
+IMAGE ?= registry.ci.openshift.org/openshift/api
+
+ifeq ($(shell command -v podman > /dev/null 2>&1 ; echo $$? ), 0)
+ ENGINE=podman
+else ifeq ($(shell command -v docker > /dev/null 2>&1 ; echo $$? ), 0)
+ ENGINE=docker
+endif
+
+USE_DOCKER ?= 0
+ifeq ($(USE_DOCKER), 1)
+ ENGINE=docker
+endif
+
+.PHONY: images
+images:
+ $(ENGINE) build -f Dockerfile.rhel8 -t "$(IMAGE):$(VERSION)" -t "$(IMAGE):$(MUTABLE_TAG)" ./
+
+################################
+#
+# END: Build binaries and images
+#
+################################
diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS
new file mode 100644
index 0000000000..2e956a47dd
--- /dev/null
+++ b/vendor/github.com/openshift/api/OWNERS
@@ -0,0 +1,19 @@
+reviewers:
+ - deads2k
+ - derekwaynecarr
+ - JoelSpeed
+ - knobunc
+ - sjenning
+ - mfojtik
+ - soltysh
+ - bparees
+approvers:
+ - bparees
+ - deads2k
+ - derekwaynecarr
+ - JoelSpeed
+ - knobunc
+ - mfojtik
+ - sjenning
+ - soltysh
+ - spadgett
diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md
new file mode 100644
index 0000000000..2054ba8151
--- /dev/null
+++ b/vendor/github.com/openshift/api/README.md
@@ -0,0 +1,333 @@
+# api
+The canonical location of the OpenShift API definition.
+This repo holds the API type definitions and serialization code used by [openshift/client-go](https://github.com/openshift/client-go)
+APIs in this repo ship inside OCP payloads.
+
+## Adding new FeatureGates
+Add your FeatureGate to feature_gates.go.
+The threshold for merging a fully disabled or TechPreview FeatureGate is an open enhancement.
+To promote to Default on any ClusterProfile, the threshold is 99% passing tests on all platforms or QE sign off.
+
+### Adding new TechPreview FeatureGate to all ClusterProfiles (Hypershift and SelfManaged)
+```go
+FeatureGateMyFeatureName = newFeatureGate("MyFeatureName").
+ reportProblemsToJiraComponent("my-jira-component").
+ contactPerson("my-team-lead").
+ productScope(ocpSpecific).
+ enableIn(TechPreviewNoUpgrade).
+ mustRegister()
+```
+
+### Adding new TechPreview FeatureGate to all only Hypershift
+This will be enabled in TechPreview on Hypershift, but never enabled on SelfManaged
+```go
+FeatureGateMyFeatureName = newFeatureGate("MyFeatureName").
+ reportProblemsToJiraComponent("my-jira-component").
+ contactPerson("my-team-lead").
+ productScope(ocpSpecific).
+ enableForClusterProfile(Hypershift, TechPreviewNoUpgrade).
+ mustRegister()
+```
+
+### Promoting to Default, but only on Hypershift
+This will be enabled in TechPreview on all ClusterProfiles and also by Default on Hypershift.
+It will be disabled in Default on SelfManaged.
+```go
+FeatureGateMyFeatureName = newFeatureGate("MyFeatureName").
+ reportProblemsToJiraComponent("my-jira-component").
+ contactPerson("my-team-lead").
+ productScope([ocpSpecific|kubernetes]).
+ enableIn(TechPreviewNoUpgrade).
+ enableForClusterProfile(Hypershift, Default).
+ mustRegister()
+```
+
+### Promoting to Default on all ClusterProfiles
+```go
+FeatureGateMyFeatureName = newFeatureGate("MyFeatureName").
+ reportProblemsToJiraComponent("my-jira-component").
+ contactPerson("my-team-lead").
+ productScope([ocpSpecific|kubernetes]).
+ enableIn(Default, TechPreviewNoUpgrade).
+ mustRegister()
+```
+
+### defining API validation tests
+Tests are logically associated with FeatureGates.
+When adding any FeatureGated functionality a new test file is required.
+The test files are located in `//tests//FeatureGate.yaml`:
+```
+route/
+ v1/
+ tests/
+ routes.route.openshift.io/
+ AAA_ungated.yaml
+ ExternalRouteCertificate.yaml
+```
+Here's an `AAA_ungated.yaml` example:
+```yaml
+apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this.
+name: Route
+crdName: routes.route.openshift.io
+tests:
+```
+
+Here's an `ExternalRouteCertificate.yaml` example:
+```yaml
+apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this.
+name: Route
+crdName: routes.route.openshift.io
+featureGate: ExternalRouteCertificate
+tests:
+```
+
+The integration tests use the crdName and featureGate to determine which tests apply to which manifests and automatically
+react to changes when the FeatureGates are enabled/disabled on various FeatureSets and ClusterProfiles.
+
+[`gen-minimal-test.sh`](tests/hack/gen-minimal-test.sh) can still function to stub out files if you don't want to
+copy/paste an existing one.
+
+### defining FeatureGate e2e tests
+
+In order to move an API into the `Default` FeatureSet, it is necessary to demonstrate completeness and reliability.
+E2E tests are the ONLY category of test that automatically prevents regression over time: repository presubmits do NOT provide equivalent protection.
+To confirm this, there is an automated verify script that runs every time a FeatureGate is added to the `Default` FeatureSet.
+The script queries our CI system (sippy/component readiness) to retrieve a list of all automated tests for a given FeatureGate
+and then enforces the following rules.
+1. Tests must contain either `[OCPFeatureGate:]` or the standard upstream `[FeatureGate:]`.
+2. There must be at least five tests for each FeatureGate.
+3. Every test must be run on every TechPreview platform we have jobs for. (Ask for an exception if your feature doesn't support a variant.)
+4. Every test must run at least 14 times on every platform/variant.
+5. Every test must pass at least 95% of the time on every platform/variant.
+
+If your FeatureGate lacks automated testing, there is an exception process that allows QE to sign off on the promotion by
+commenting on the PR.
+
+
+## defining new APIs
+
+When defining a new API, please follow [the OpenShift API
+conventions](https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#api),
+and then follow the instructions below to regenerate CRDs (if necessary) and
+submit a pull request with your new API definitions and generated files.
+
+### Adding a new stable API (v1)
+When copying, it matters which `// +foo` markers are two comments blocks up and which are one comment block up.
+
+```go
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// the next line of whitespace matters
+
+// MyAPI is amazing, let me describe it!
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=my-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=myapis,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/
+// +openshift:capability=IfYouHaveOne
+// +kubebuilder:printcolumn:name=Column Name,JSONPath=.status.something,type=string,description=how users should interpret this.
+// +kubebuilder:metadata:annotations=key=value
+// +kubebuilder:metadata:labels=key=value
+// +kubebuilder:validation:XValidation:rule=
+type MyAPI struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the desired state of the cluster version - the operator will work
+ // to ensure that the desired version is applied to the cluster.
+ // +kubebuilder:validation:Required
+ Spec MyAPISpec `json:"spec"`
+ // status contains information about the available updates and any in-progress
+ // updates.
+ // +optional
+ Status MyAPIStatus `json:"status"`
+}
+
+```
+
+### Adding a new unstable API (v1alpha)
+First, add a FeatureGate as described above.
+
+Like above, but there's an additional
+
+```go
+// +kubebuilder:validation:XValidation:rule=
+// +openshift:enable:FeatureGate=MyFeatureGate
+type MyAPI struct {
+ ...
+}
+```
+
+### Adding new fields
+Here are few other use-cases for convenience, but have a look in `./example` for other possibilities.
+
+
+```go
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=MyFeatureGate,rule="has(oldSelf.coolNewField) ? has(self.coolNewField) : true",message="coolNewField may not be removed once set"
+type MyAPI struct {
+ // +openshift:enable:FeatureGate=MyFeatureGate
+ // +optional
+ CoolNewField string `json:"coolNewField"`
+}
+
+// EvolvingDiscriminator defines the audit policy profile type.
+// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";StableValue
+// +openshift:validation:FeatureGateAwareEnum:featureGate=MyFeatureGate,enum="";StableValue;TechPreviewOnlyValue
+type EvolvingDiscriminator string
+
+const (
+ // "StableValue" is always present.
+ StableValue EvolvingDiscriminator = "StableValue"
+
+ // "TechPreviewOnlyValue" should only be allowed when TechPreviewNoUpgrade is set in the cluster
+ TechPreviewOnlyValue EvolvingDiscriminator = "TechPreviewOnlyValue"
+)
+
+```
+
+
+### required labels
+
+In addition to the standard `lgtm` and `approved` labels this repository requires either:
+
+`bugzilla/valid-bug` - applied if your PR references a valid bugzilla bug
+
+OR
+
+`qe-approved`, `docs-approved`, and `px-approved` - these labels can be applied by anyone in the openshift org via the `/label` command.
+
+Who should apply these qe/docs/px labels?
+- For a no-FF team who is merging a feature before code freeze, they need to get those labels applied to their api repo PR by the appropriate teams (i.e. qe, docs, px)
+- For a FF(traditional) team who is merging a feature before FF, they can self-apply the labels(via /label commands), they are basically irrelevant for those teams
+- For a FF team who is merging a feature after FF, the PR should be rejected barring an exception
+
+Why are these labels needed?
+
+We need a way for no-FF teams to be able to merge post-FF that does not require a BZ. For non-shared repos that mechanism is the
+qe/docs/px-approved labels. We are expanding that mechanism to shared repos because the alternative would be that no-FF teams would
+put a dummy `bugzilla/valid-bug` label on their feature PRs in order to be able to merge them after feature freeze. Since most
+individuals can't apply a `bugzilla/valid-bug` label to a PR, this introduces additional obstacles on those PRs. Conversely, anyone
+can apply the docs/qe/px-approved labels, so "FF" teams that need to apply these labels to merge can do so w/o needing to involve
+anyone additional.
+
+Does this mean feature-freeze teams can use the no-FF process to merge code?
+
+No, signing a team up to be a no-FF team includes some basic education on the process and includes ensuring the associated QE+Docs
+participants are aware the team is moving to that model. If you'd like to sign your team up, please speak with Gina Hargan who will
+be happy to help on-board your team.
+
+## vendoring generated manifests into other repositories
+If your repository relies on vendoring and copying CRD manifests (good job!), you'll need have an import line that
+depends on the package that contains the CRD manifests.
+For example, adding
+```go
+import (
+ _ "github.com/openshift/api/operatoringress/v1/zz_generated.crd-manifests"
+)
+```
+to any .go file will work, but some commonly chosen files are `tools/tools.go` or `pkg/dependencymagnet/doc.go`.
+Once added, a `go mod vendor` will pick up the package containing the manifests for you to copy.
+
+## generating CRD schemas
+
+Since Kubernetes 1.16, every CRD created in `apiextensions.k8s.io/v1` is required to have a [structural OpenAPIV3 schema](https://kubernetes.io/blog/2019/06/20/crd-structural-schema/). The schemas provide server-side validation for fields, as well as providing the descriptions for `oc explain`. Moreover, schemas ensure structural consistency of data in etcd. Without it anything can be stored in a resource which can have security implications. As we host many of our CRDs in this repo along with their corresponding Go types we also require them to have schemas. However, the following instructions apply for CRDs that are not hosted here as well.
+
+These schemas are often very long and complex, and should not be written by hand. For OpenShift, we provide Makefile targets in [build-machinery-go](https://github.com/openshift/build-machinery-go/) which generate the schema, built on upstream's [controller-gen](https://github.com/kubernetes-sigs/controller-tools) tool.
+
+If you make a change to a CRD type in this repo, simply calling `make update-codegen-crds` should regenerate all CRDs and update the manifests. If yours is not updated, ensure that the path to its API is included in our [calls to the Makefile targets](https://github.com/openshift/api/blob/release-4.5/Makefile#L17-L29), if this doesn't help try calling `make generate-with-container` for executing the generators in a controlled environment.
+
+To add this generator to another repo:
+1. Vendor `github.com/openshift/build-machinery-go`
+
+2. Update your `Makefile` to include the following:
+```
+include $(addprefix ./vendor/github.com/openshift/build-machinery-go/make/, \
+ targets/openshift/crd-schema-gen.mk \
+)
+
+$(call add-crd-gen,,,,)
+```
+The parameters for the call are:
+
+1. `TARGET_NAME`: The name of your generated Make target. This can be anything, as long as it does not conflict with another make target. Recommended to be your api name.
+2. `API_DIRECTORY`: The location of your API. For example if your Go types are located under `pkg/apis/myoperator/v1/types.go`, this should be `./pkg/apis/myoperator/v1`.
+3. `CRD_MANIFESTS`: The directory your CRDs are located in. For example, if that is `manifests/my_operator.crd.yaml` then it should be `./manifests`
+4. `MANIFEST_OUTPUT`: This should most likely be the same as `CRD_MANIFESTS`, and is only provided for flexibility to output generated code to a different directory.
+
+You can include as many calls to different APIs as necessary, or if you have multiple APIs under the same directory (eg, `v1` and `v2beta1`) you can use 1 call to the parent directory pointing to your API.
+
+After this, calling `make update-codegen-crds` should generate a new structural OpenAPIV3 schema for your CRDs.
+
+**Notes**
+- This will not generate entire CRDs, only their OpenAPIV3 schemas. If you do not already have a CRD, you will get no output from the generator.
+- Ensure that your API is correctly declared for the generator to pick it up. That means, in your `doc.go`, include the following:
+ 1. `// +groupName=`, this should match the `group` in your CRD `spec`
+ 2. `// +kubebuilder:validation:Optional`, this tells the operator that fields should be optional unless explicitly marked with `// +kubebuilder:validation:Required`
+
+For more information on the API markers to add to your Go types, see the [Kubebuilder book](https://book.kubebuilder.io/reference/markers.html)
+
+### Order of generation
+`make update-codegen-crds` does roughly this:
+
+1. Run the `empty-partial-schema` tool. This creates empty CRD manifests in `zz_generated.featuregated-crd-manifests` for each FeatureGate.
+2. Run the `schemapatch` tool. This fills in the schema for each per-FeatureGate CRD manifest.
+3. Run the `manifest-merge` tool. This combines all the per-FeatureGate CRD manifests and `manual-overrides`
+
+#### empty-partial-schema
+This tool is gengo based and scans all types for a `// +kubebuilder:object:root=true` marker.
+For each type match, the type is navigated and all tags that include a `featureGate`
+(`// +openshift:enable:FeatureGate`, `// +openshift:validation:FeatureGateAwareEnum`, and `// +openshift:validation:FeatureGateAwareXValidation`)
+are tracked.
+For each type, for each FeatureGate, a file CRD manifest is created in `zz_generated.featuregated-crd-manifests`.
+The most common kube-builder tags are re-implemented in this stage to fill in the non-schema portion of the CRD manifests.
+This includes things like metadata, resource, and some custom openshift tags as well.
+
+The generator ignores the schema when doing verify, so it doesn't fail on needing to run `schemapatch`.
+The generator should clean up old FeatureGated manifests when the gate is removed.
+Ungated files are created for resources that are sometimes ungated.
+Annotations are injected to indicate which FeatureGate a manifest is for: this is later read by `schemapatch` and `manifest-merge`.
+
+#### schemapatch
+This tool is kubebuilder based with patches to handle FeatureGated types, members, and validation.
+It reads the injected annotation from `empty-partial-schema` to decide which FeatureGate should be considered enabled when
+creating the schema that needs to be injected.
+It has no knowledge of whether the FeatureGate is enabled or disabled in particular ClusterProfile,FeatureSet tuples.
+It only needs a single pass over all the FeatureGated partial manifests.
+
+If the schema generation isn't doing what you want, `manual-override-crd-manifests` allows partially overlaying bits of the CRD manifest.
+`yamlpatch` is no longer supported.
+The format is just "write the CRD you want and delete the stuff the generator sets properly".
+More specifically, it is the partial manifest that server-side-apply (structured merge diff) would properly merge on top of
+the CRD that is generated otherwise.
+Caveat, you cannot test this with a kube-apiserver because the CRD schema uses atomic lists and we had to patch that
+schema to indicate map lists keyed by version.
+
+#### manifest-merge
+This tool is gengo based and it combines the files in `zz_generated.featuregated-crd-manifests` and `manual-override-crd-manifests`
+on a per ClusterProfile,FeatureSet tuple.
+This tool takes as input all possible ClusterProfiles and all possible FeatureSets.
+It then maps from ClusterProfile,FeatureSet tuple to the set of enabled and disabled FeatureGates.
+Then for each CRD,ClusterProfile,Feature tuple, it merges the pertinent input using structured-merge-diff (SSA) logic
+based on the CRD schema plus a patch to make atomic fields map-lists.
+Pertinence is determined based on
+1. does this manifest have preferred ClusterProfile annotations: if so, honor them; if not, include everywhere.
+2. does this manifest have FeatureGate annotations: if so, match against the enabled set for the ClusterProfile,FeatureSet tuple.
+ Note that CustomNoUpgrade selects everything
+
+Once we have CRD for each ClusterProfile,FeatureSet tuple we choose what to serialize.
+This roughly follows:
+1. if all the CRDs are the same, write a single file and annotate with no FeatureSet and every ClusterProfile. Done.
+2. if all the CRDs are the same across all ClusterProfiles for each FeatureSet, create one file per FeatureSet and
+ annotate with one FeatureSet and all ClusterProfiles. Done.
+3. if all the CRDs are the same across all FeatureSets for one ClusterProfile, create one file and annotate
+ with no FeatureSet and one ClusterProfile. Continue to 4.
+4. for all remaining ClusterProfile,FeatureSet tuples, serialize a file with one FeatureSet and one ClusterProfile.
+
diff --git a/vendor/github.com/openshift/api/apiserver/.codegen.yaml b/vendor/github.com/openshift/api/apiserver/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/apiserver/install.go b/vendor/github.com/openshift/api/apiserver/install.go
new file mode 100644
index 0000000000..c0cf2ac29c
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/install.go
@@ -0,0 +1,22 @@
+package apiserver
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ v1 "github.com/openshift/api/apiserver/v1"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(v1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: "apiserver.openshift.io", Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: "apiserver.openshift.io", Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/apiserver/v1/Makefile b/vendor/github.com/openshift/api/apiserver/v1/Makefile
new file mode 100644
index 0000000000..a2d1fa49be
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="apiserver.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/apiserver/v1/doc.go b/vendor/github.com/openshift/api/apiserver/v1/doc.go
new file mode 100644
index 0000000000..cc6a8aa617
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=apiserver.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/apiserver/v1/register.go b/vendor/github.com/openshift/api/apiserver/v1/register.go
new file mode 100644
index 0000000000..9d6e126e40
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/v1/register.go
@@ -0,0 +1,38 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "apiserver.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &APIRequestCount{},
+ &APIRequestCountList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go
new file mode 100644
index 0000000000..eb4918a661
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go
@@ -0,0 +1,178 @@
+// Package v1 is an api version in the apiserver.openshift.io group
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+const (
+ // RemovedInReleaseLabel is a label which can be used to select APIRequestCounts based on the release
+ // in which they are removed. The value is equivalent to .status.removedInRelease.
+ RemovedInReleaseLabel = "apirequestcounts.apiserver.openshift.io/removedInRelease"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient:nonNamespaced
+// +openshift:compatibility-gen:level=1
+
+// APIRequestCount tracks requests made to an API. The instance name must
+// be of the form `resource.version.group`, matching the resource.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=apirequestcounts,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/897
+// +openshift:file-pattern=operatorName=kube-apiserver
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+// +kubebuilder:printcolumn:name=RemovedInRelease,JSONPath=.status.removedInRelease,type=string,description=Release in which an API will be removed.
+// +kubebuilder:printcolumn:name=RequestsInCurrentHour,JSONPath=.status.currentHour.requestCount,type=integer,description=Number of requests in the current hour.
+// +kubebuilder:printcolumn:name=RequestsInLast24h,JSONPath=.status.requestCount,type=integer,description=Number of requests in the last 24h.
+type APIRequestCount struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec defines the characteristics of the resource.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec APIRequestCountSpec `json:"spec"`
+
+ // status contains the observed state of the resource.
+ Status APIRequestCountStatus `json:"status,omitempty"`
+}
+
+type APIRequestCountSpec struct {
+
+ // numberOfUsersToReport is the number of users to include in the report.
+ // If unspecified or zero, the default is ten. This is default is subject to change.
+ // +kubebuilder:default:=10
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=100
+ // +optional
+ NumberOfUsersToReport int64 `json:"numberOfUsersToReport"`
+}
+
+// +k8s:deepcopy-gen=true
+type APIRequestCountStatus struct {
+
+ // conditions contains details of the current status of this API Resource.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // removedInRelease is when the API will be removed.
+ // +kubebuilder:validation:MinLength=0
+ // +kubebuilder:validation:Pattern=^[0-9][0-9]*\.[0-9][0-9]*$
+ // +kubebuilder:validation:MaxLength=64
+ // +optional
+ RemovedInRelease string `json:"removedInRelease,omitempty"`
+
+ // requestCount is a sum of all requestCounts across all current hours, nodes, and users.
+ // +kubebuilder:validation:Minimum=0
+ // +required
+ RequestCount int64 `json:"requestCount"`
+
+ // currentHour contains request history for the current hour. This is porcelain to make the API
+ // easier to read by humans seeing if they addressed a problem. This field is reset on the hour.
+ // +optional
+ CurrentHour PerResourceAPIRequestLog `json:"currentHour"`
+
+ // last24h contains request history for the last 24 hours, indexed by the hour, so
+ // 12:00AM-12:59 is in index 0, 6am-6:59am is index 6, etc. The index of the current hour
+ // is updated live and then duplicated into the requestsLastHour field.
+ // +kubebuilder:validation:MaxItems=24
+ // +optional
+ Last24h []PerResourceAPIRequestLog `json:"last24h"`
+}
+
+// PerResourceAPIRequestLog logs request for various nodes.
+type PerResourceAPIRequestLog struct {
+
+ // byNode contains logs of requests per node.
+ // +kubebuilder:validation:MaxItems=512
+ // +optional
+ ByNode []PerNodeAPIRequestLog `json:"byNode"`
+
+ // requestCount is a sum of all requestCounts across nodes.
+ // +kubebuilder:validation:Minimum=0
+ // +required
+ RequestCount int64 `json:"requestCount"`
+}
+
+// PerNodeAPIRequestLog contains logs of requests to a certain node.
+type PerNodeAPIRequestLog struct {
+
+ // nodeName where the request are being handled.
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=512
+ // +required
+ NodeName string `json:"nodeName"`
+
+ // requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users.
+ // +kubebuilder:validation:Minimum=0
+ // +required
+ RequestCount int64 `json:"requestCount"`
+
+ // byUser contains request details by top .spec.numberOfUsersToReport users.
+ // Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis,
+ // the list might be imprecise.
+ // In addition, some system users may be explicitly included in the list.
+ // +kubebuilder:validation:MaxItems=500
+ ByUser []PerUserAPIRequestCount `json:"byUser"`
+}
+
+// PerUserAPIRequestCount contains logs of a user's requests.
+type PerUserAPIRequestCount struct {
+
+ // userName that made the request.
+ // +kubebuilder:validation:MaxLength=512
+ UserName string `json:"username"`
+
+ // userAgent that made the request.
+ // The same user often has multiple binaries which connect (pods with many containers). The different binaries
+ // will have different userAgents, but the same user. In addition, we have userAgents with version information
+ // embedded and the userName isn't likely to change.
+ // +kubebuilder:validation:MaxLength=1024
+ UserAgent string `json:"userAgent"`
+
+ // requestCount of requests by the user across all verbs.
+ // +kubebuilder:validation:Minimum=0
+ // +required
+ RequestCount int64 `json:"requestCount"`
+
+ // byVerb details by verb.
+ // +kubebuilder:validation:MaxItems=10
+ ByVerb []PerVerbAPIRequestCount `json:"byVerb"`
+}
+
+// PerVerbAPIRequestCount requestCounts requests by API request verb.
+type PerVerbAPIRequestCount struct {
+
+ // verb of API request (get, list, create, etc...)
+ // +kubebuilder:validation:MaxLength=20
+ // +required
+ Verb string `json:"verb"`
+
+ // requestCount of requests for verb.
+ // +kubebuilder:validation:Minimum=0
+ // +required
+ RequestCount int64 `json:"requestCount"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:compatibility-gen:level=1
+
+// APIRequestCountList is a list of APIRequestCount resources.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+type APIRequestCountList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []APIRequestCount `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..79be371535
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go
@@ -0,0 +1,202 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIRequestCount) DeepCopyInto(out *APIRequestCount) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCount.
+func (in *APIRequestCount) DeepCopy() *APIRequestCount {
+ if in == nil {
+ return nil
+ }
+ out := new(APIRequestCount)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIRequestCount) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIRequestCountList) DeepCopyInto(out *APIRequestCountList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIRequestCount, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountList.
+func (in *APIRequestCountList) DeepCopy() *APIRequestCountList {
+ if in == nil {
+ return nil
+ }
+ out := new(APIRequestCountList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIRequestCountList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIRequestCountSpec) DeepCopyInto(out *APIRequestCountSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountSpec.
+func (in *APIRequestCountSpec) DeepCopy() *APIRequestCountSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(APIRequestCountSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIRequestCountStatus) DeepCopyInto(out *APIRequestCountStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.CurrentHour.DeepCopyInto(&out.CurrentHour)
+ if in.Last24h != nil {
+ in, out := &in.Last24h, &out.Last24h
+ *out = make([]PerResourceAPIRequestLog, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountStatus.
+func (in *APIRequestCountStatus) DeepCopy() *APIRequestCountStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(APIRequestCountStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerNodeAPIRequestLog) DeepCopyInto(out *PerNodeAPIRequestLog) {
+ *out = *in
+ if in.ByUser != nil {
+ in, out := &in.ByUser, &out.ByUser
+ *out = make([]PerUserAPIRequestCount, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerNodeAPIRequestLog.
+func (in *PerNodeAPIRequestLog) DeepCopy() *PerNodeAPIRequestLog {
+ if in == nil {
+ return nil
+ }
+ out := new(PerNodeAPIRequestLog)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerResourceAPIRequestLog) DeepCopyInto(out *PerResourceAPIRequestLog) {
+ *out = *in
+ if in.ByNode != nil {
+ in, out := &in.ByNode, &out.ByNode
+ *out = make([]PerNodeAPIRequestLog, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerResourceAPIRequestLog.
+func (in *PerResourceAPIRequestLog) DeepCopy() *PerResourceAPIRequestLog {
+ if in == nil {
+ return nil
+ }
+ out := new(PerResourceAPIRequestLog)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerUserAPIRequestCount) DeepCopyInto(out *PerUserAPIRequestCount) {
+ *out = *in
+ if in.ByVerb != nil {
+ in, out := &in.ByVerb, &out.ByVerb
+ *out = make([]PerVerbAPIRequestCount, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerUserAPIRequestCount.
+func (in *PerUserAPIRequestCount) DeepCopy() *PerUserAPIRequestCount {
+ if in == nil {
+ return nil
+ }
+ out := new(PerUserAPIRequestCount)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerVerbAPIRequestCount) DeepCopyInto(out *PerVerbAPIRequestCount) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerVerbAPIRequestCount.
+func (in *PerVerbAPIRequestCount) DeepCopy() *PerVerbAPIRequestCount {
+ if in == nil {
+ return nil
+ }
+ out := new(PerVerbAPIRequestCount)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..f5ff911a2f
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,34 @@
+apirequestcounts.apiserver.openshift.io:
+ Annotations:
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/897
+ CRDName: apirequestcounts.apiserver.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: kube-apiserver
+ FilenameOperatorOrdering: ""
+ FilenameRunLevel: ""
+ GroupName: apiserver.openshift.io
+ HasStatus: true
+ KindName: APIRequestCount
+ Labels: {}
+ PluralName: apirequestcounts
+ PrinterColumns:
+ - description: Release in which an API will be removed.
+ jsonPath: .status.removedInRelease
+ name: RemovedInRelease
+ type: string
+ - description: Number of requests in the current hour.
+ jsonPath: .status.currentHour.requestCount
+ name: RequestsInCurrentHour
+ type: integer
+ - description: Number of requests in the last 24h.
+ jsonPath: .status.requestCount
+ name: RequestsInLast24h
+ type: integer
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..27d74b6c19
--- /dev/null
+++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,97 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_APIRequestCount = map[string]string{
+ "": "APIRequestCount tracks requests made to an API. The instance name must be of the form `resource.version.group`, matching the resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec defines the characteristics of the resource.",
+ "status": "status contains the observed state of the resource.",
+}
+
+func (APIRequestCount) SwaggerDoc() map[string]string {
+ return map_APIRequestCount
+}
+
+var map_APIRequestCountList = map[string]string{
+ "": "APIRequestCountList is a list of APIRequestCount resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (APIRequestCountList) SwaggerDoc() map[string]string {
+ return map_APIRequestCountList
+}
+
+var map_APIRequestCountSpec = map[string]string{
+ "numberOfUsersToReport": "numberOfUsersToReport is the number of users to include in the report. If unspecified or zero, the default is ten. This is default is subject to change.",
+}
+
+func (APIRequestCountSpec) SwaggerDoc() map[string]string {
+ return map_APIRequestCountSpec
+}
+
+var map_APIRequestCountStatus = map[string]string{
+ "conditions": "conditions contains details of the current status of this API Resource.",
+ "removedInRelease": "removedInRelease is when the API will be removed.",
+ "requestCount": "requestCount is a sum of all requestCounts across all current hours, nodes, and users.",
+ "currentHour": "currentHour contains request history for the current hour. This is porcelain to make the API easier to read by humans seeing if they addressed a problem. This field is reset on the hour.",
+ "last24h": "last24h contains request history for the last 24 hours, indexed by the hour, so 12:00AM-12:59 is in index 0, 6am-6:59am is index 6, etc. The index of the current hour is updated live and then duplicated into the requestsLastHour field.",
+}
+
+func (APIRequestCountStatus) SwaggerDoc() map[string]string {
+ return map_APIRequestCountStatus
+}
+
+var map_PerNodeAPIRequestLog = map[string]string{
+ "": "PerNodeAPIRequestLog contains logs of requests to a certain node.",
+ "nodeName": "nodeName where the request are being handled.",
+ "requestCount": "requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users.",
+ "byUser": "byUser contains request details by top .spec.numberOfUsersToReport users. Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis, the list might be imprecise. In addition, some system users may be explicitly included in the list.",
+}
+
+func (PerNodeAPIRequestLog) SwaggerDoc() map[string]string {
+ return map_PerNodeAPIRequestLog
+}
+
+var map_PerResourceAPIRequestLog = map[string]string{
+ "": "PerResourceAPIRequestLog logs request for various nodes.",
+ "byNode": "byNode contains logs of requests per node.",
+ "requestCount": "requestCount is a sum of all requestCounts across nodes.",
+}
+
+func (PerResourceAPIRequestLog) SwaggerDoc() map[string]string {
+ return map_PerResourceAPIRequestLog
+}
+
+var map_PerUserAPIRequestCount = map[string]string{
+ "": "PerUserAPIRequestCount contains logs of a user's requests.",
+ "username": "userName that made the request.",
+ "userAgent": "userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change.",
+ "requestCount": "requestCount of requests by the user across all verbs.",
+ "byVerb": "byVerb details by verb.",
+}
+
+func (PerUserAPIRequestCount) SwaggerDoc() map[string]string {
+ return map_PerUserAPIRequestCount
+}
+
+var map_PerVerbAPIRequestCount = map[string]string{
+ "": "PerVerbAPIRequestCount requestCounts requests by API request verb.",
+ "verb": "verb of API request (get, list, create, etc...)",
+ "requestCount": "requestCount of requests for verb.",
+}
+
+func (PerVerbAPIRequestCount) SwaggerDoc() map[string]string {
+ return map_PerVerbAPIRequestCount
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/apps/OWNERS b/vendor/github.com/openshift/api/apps/OWNERS
new file mode 100644
index 0000000000..d8d669b910
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/OWNERS
@@ -0,0 +1,3 @@
+reviewers:
+ - mfojtik
+ - soltysh
diff --git a/vendor/github.com/openshift/api/apps/install.go b/vendor/github.com/openshift/api/apps/install.go
new file mode 100644
index 0000000000..80f7ba2b2c
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/install.go
@@ -0,0 +1,26 @@
+package apps
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ appsv1 "github.com/openshift/api/apps/v1"
+)
+
+const (
+ GroupName = "apps.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(appsv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/apps/v1/consts.go b/vendor/github.com/openshift/api/apps/v1/consts.go
new file mode 100644
index 0000000000..212578bccf
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/consts.go
@@ -0,0 +1,108 @@
+package v1
+
+const (
+ // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state
+ // Used for specifying the reason for cancellation or failure of a deployment
+ // This is on replication controller set by deployer controller.
+ DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason"
+
+ // DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The
+ // annotation value is the name of the deployer Pod which will act upon the ReplicationController
+ // to implement the deployment behavior.
+ // This is set on replication controller by deployer controller.
+ DeploymentPodAnnotation = "openshift.io/deployer-pod.name"
+
+ // DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the
+ // DeploymentConfig on which the deployment is based.
+ // This is set on replication controller pod template by deployer controller.
+ DeploymentConfigAnnotation = "openshift.io/deployment-config.name"
+
+ // DeploymentCancelledAnnotation indicates that the deployment has been cancelled
+ // The annotation value does not matter and its mere presence indicates cancellation.
+ // This is set on replication controller by deployment config controller or oc rollout cancel command.
+ DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled"
+
+ // DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded
+ // DeploymentConfig on which a given deployment is based.
+ // This is set on replication controller by deployer controller.
+ DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config"
+
+ // DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The
+ // annotation value is the LatestVersion value of the DeploymentConfig which was the basis for
+ // the deployment.
+ // This is set on replication controller pod template by deployment config controller.
+ DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version"
+
+ // DeployerPodForDeploymentLabel is a label which groups pods related to a
+ // deployment. The value is a deployment name. The deployer pod and hook pods
+ // created by the internal strategies will have this label. Custom
+ // strategies can apply this label to any pods they create, enabling
+ // platform-provided cancellation and garbage collection support.
+ // This is set on deployer pod by deployer controller.
+ DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name"
+
+ // DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of
+ // a deployment.
+ // This is set on replication controller by deployer controller.
+ DeploymentStatusAnnotation = "openshift.io/deployment.phase"
+)
+
+type DeploymentConditionReason string
+
+var (
+ // ReplicationControllerUpdatedReason is added in a deployment config when one of its replication
+ // controllers is updated as part of the rollout process.
+ ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated"
+
+ // ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication
+ // controller.
+ ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError"
+
+ // ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication
+ // controller.
+ NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated"
+
+ // NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made
+ // available ie. the number of new pods that have passed readiness checks and run for at least
+ // minReadySeconds is at least the minimum available pods that need to run for the deployment config.
+ NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable"
+
+ // ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show
+ // any progress within the given deadline (progressDeadlineSeconds).
+ ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded"
+
+ // DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be
+ // estimated once a deployment config is paused.
+ DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused"
+
+ // DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally
+ // deployment configs that paused amidst a rollout.
+ DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed"
+
+ // RolloutCancelledReason is added in a deployment config when its newest rollout was
+ // interrupted by cancellation.
+ RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled"
+)
+
+// DeploymentStatus describes the possible states a deployment can be in.
+type DeploymentStatus string
+
+var (
+
+ // DeploymentStatusNew means the deployment has been accepted but not yet acted upon.
+ DeploymentStatusNew DeploymentStatus = "New"
+
+ // DeploymentStatusPending means the deployment been handed over to a deployment strategy,
+ // but the strategy has not yet declared the deployment to be running.
+ DeploymentStatusPending DeploymentStatus = "Pending"
+
+ // DeploymentStatusRunning means the deployment strategy has reported the deployment as
+ // being in-progress.
+ DeploymentStatusRunning DeploymentStatus = "Running"
+
+ // DeploymentStatusComplete means the deployment finished without an error.
+ DeploymentStatusComplete DeploymentStatus = "Complete"
+
+ // DeploymentStatusFailed means the deployment finished with an error.
+ DeploymentStatusFailed DeploymentStatus = "Failed"
+)
diff --git a/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go
new file mode 100644
index 0000000000..31969786c4
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go
@@ -0,0 +1,38 @@
+package v1
+
+// This file contains consts that are not shared between components and set just internally.
+// They will likely be removed in (near) future.
+
+const (
+ // DeployerPodCreatedAtAnnotation is an annotation on a deployment that
+ // records the time in RFC3339 format of when the deployer pod for this particular
+ // deployment was created.
+ // This is set by deployer controller, but not consumed by any command or internally.
+ // DEPRECATED: will be removed soon
+ DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at"
+
+ // DeployerPodStartedAtAnnotation is an annotation on a deployment that
+ // records the time in RFC3339 format of when the deployer pod for this particular
+ // deployment was started.
+ // This is set by deployer controller, but not consumed by any command or internally.
+ // DEPRECATED: will be removed soon
+ DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at"
+
+ // DeployerPodCompletedAtAnnotation is an annotation on deployment that records
+ // the time in RFC3339 format of when the deployer pod finished.
+ // This is set by deployer controller, but not consumed by any command or internally.
+ // DEPRECATED: will be removed soon
+ DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at"
+
+ // DesiredReplicasAnnotation represents the desired number of replicas for a
+ // new deployment.
+ // This is set by deployer controller, but not consumed by any command or internally.
+ // DEPRECATED: will be removed soon
+ DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas"
+
+ // DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name
+ // of the deployment (a ReplicationController) on which the deployer Pod acts.
+ // This is set by deployer controller and consumed internally and in oc adm top command.
+ // DEPRECATED: will be removed soon
+ DeploymentAnnotation = "openshift.io/deployment.name"
+)
diff --git a/vendor/github.com/openshift/api/apps/v1/doc.go b/vendor/github.com/openshift/api/apps/v1/doc.go
new file mode 100644
index 0000000000..f0fb3f59a2
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/doc.go
@@ -0,0 +1,9 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/apps/apis/apps
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+// +k8s:prerelease-lifecycle-gen=true
+
+// +groupName=apps.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go
new file mode 100644
index 0000000000..18ed8b9310
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go
@@ -0,0 +1,7461 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/apps/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} }
+func (*CustomDeploymentStrategyParams) ProtoMessage() {}
+func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{0}
+}
+func (m *CustomDeploymentStrategyParams) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CustomDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CustomDeploymentStrategyParams) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CustomDeploymentStrategyParams.Merge(m, src)
+}
+func (m *CustomDeploymentStrategyParams) XXX_Size() int {
+ return m.Size()
+}
+func (m *CustomDeploymentStrategyParams) XXX_DiscardUnknown() {
+ xxx_messageInfo_CustomDeploymentStrategyParams.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CustomDeploymentStrategyParams proto.InternalMessageInfo
+
+func (m *DeploymentCause) Reset() { *m = DeploymentCause{} }
+func (*DeploymentCause) ProtoMessage() {}
+func (*DeploymentCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{1}
+}
+func (m *DeploymentCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentCause.Merge(m, src)
+}
+func (m *DeploymentCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentCause proto.InternalMessageInfo
+
+func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} }
+func (*DeploymentCauseImageTrigger) ProtoMessage() {}
+func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{2}
+}
+func (m *DeploymentCauseImageTrigger) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentCauseImageTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentCauseImageTrigger) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentCauseImageTrigger.Merge(m, src)
+}
+func (m *DeploymentCauseImageTrigger) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentCauseImageTrigger) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentCauseImageTrigger.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentCauseImageTrigger proto.InternalMessageInfo
+
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
+func (*DeploymentCondition) ProtoMessage() {}
+func (*DeploymentCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{3}
+}
+func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentCondition.Merge(m, src)
+}
+func (m *DeploymentCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
+
+func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} }
+func (*DeploymentConfig) ProtoMessage() {}
+func (*DeploymentConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{4}
+}
+func (m *DeploymentConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentConfig.Merge(m, src)
+}
+func (m *DeploymentConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentConfig proto.InternalMessageInfo
+
+func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} }
+func (*DeploymentConfigList) ProtoMessage() {}
+func (*DeploymentConfigList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{5}
+}
+func (m *DeploymentConfigList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentConfigList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentConfigList.Merge(m, src)
+}
+func (m *DeploymentConfigList) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentConfigList) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentConfigList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentConfigList proto.InternalMessageInfo
+
+func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} }
+func (*DeploymentConfigRollback) ProtoMessage() {}
+func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{6}
+}
+func (m *DeploymentConfigRollback) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentConfigRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentConfigRollback) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentConfigRollback.Merge(m, src)
+}
+func (m *DeploymentConfigRollback) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentConfigRollback) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentConfigRollback.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentConfigRollback proto.InternalMessageInfo
+
+func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} }
+func (*DeploymentConfigRollbackSpec) ProtoMessage() {}
+func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{7}
+}
+func (m *DeploymentConfigRollbackSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentConfigRollbackSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentConfigRollbackSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentConfigRollbackSpec.Merge(m, src)
+}
+func (m *DeploymentConfigRollbackSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentConfigRollbackSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentConfigRollbackSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentConfigRollbackSpec proto.InternalMessageInfo
+
+func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} }
+func (*DeploymentConfigSpec) ProtoMessage() {}
+func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{8}
+}
+func (m *DeploymentConfigSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentConfigSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentConfigSpec.Merge(m, src)
+}
+func (m *DeploymentConfigSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentConfigSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentConfigSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentConfigSpec proto.InternalMessageInfo
+
+func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} }
+func (*DeploymentConfigStatus) ProtoMessage() {}
+func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{9}
+}
+func (m *DeploymentConfigStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentConfigStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentConfigStatus.Merge(m, src)
+}
+func (m *DeploymentConfigStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentConfigStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentConfigStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentConfigStatus proto.InternalMessageInfo
+
+func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} }
+func (*DeploymentDetails) ProtoMessage() {}
+func (*DeploymentDetails) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{10}
+}
+func (m *DeploymentDetails) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentDetails) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentDetails.Merge(m, src)
+}
+func (m *DeploymentDetails) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentDetails) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentDetails proto.InternalMessageInfo
+
+func (m *DeploymentLog) Reset() { *m = DeploymentLog{} }
+func (*DeploymentLog) ProtoMessage() {}
+func (*DeploymentLog) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{11}
+}
+func (m *DeploymentLog) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentLog) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentLog.Merge(m, src)
+}
+func (m *DeploymentLog) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentLog) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentLog.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentLog proto.InternalMessageInfo
+
+func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} }
+func (*DeploymentLogOptions) ProtoMessage() {}
+func (*DeploymentLogOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{12}
+}
+func (m *DeploymentLogOptions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentLogOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentLogOptions.Merge(m, src)
+}
+func (m *DeploymentLogOptions) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentLogOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentLogOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentLogOptions proto.InternalMessageInfo
+
+func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} }
+func (*DeploymentRequest) ProtoMessage() {}
+func (*DeploymentRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{13}
+}
+func (m *DeploymentRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentRequest.Merge(m, src)
+}
+func (m *DeploymentRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentRequest proto.InternalMessageInfo
+
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
+func (*DeploymentStrategy) ProtoMessage() {}
+func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{14}
+}
+func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentStrategy.Merge(m, src)
+}
+func (m *DeploymentStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
+
+func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} }
+func (*DeploymentTriggerImageChangeParams) ProtoMessage() {}
+func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{15}
+}
+func (m *DeploymentTriggerImageChangeParams) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentTriggerImageChangeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentTriggerImageChangeParams) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentTriggerImageChangeParams.Merge(m, src)
+}
+func (m *DeploymentTriggerImageChangeParams) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentTriggerImageChangeParams) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentTriggerImageChangeParams.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentTriggerImageChangeParams proto.InternalMessageInfo
+
+func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} }
+func (*DeploymentTriggerPolicies) ProtoMessage() {}
+func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{16}
+}
+func (m *DeploymentTriggerPolicies) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentTriggerPolicies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentTriggerPolicies) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentTriggerPolicies.Merge(m, src)
+}
+func (m *DeploymentTriggerPolicies) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentTriggerPolicies) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentTriggerPolicies.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentTriggerPolicies proto.InternalMessageInfo
+
+func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} }
+func (*DeploymentTriggerPolicy) ProtoMessage() {}
+func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{17}
+}
+func (m *DeploymentTriggerPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeploymentTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DeploymentTriggerPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeploymentTriggerPolicy.Merge(m, src)
+}
+func (m *DeploymentTriggerPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeploymentTriggerPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeploymentTriggerPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeploymentTriggerPolicy proto.InternalMessageInfo
+
+func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} }
+func (*ExecNewPodHook) ProtoMessage() {}
+func (*ExecNewPodHook) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{18}
+}
+func (m *ExecNewPodHook) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExecNewPodHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExecNewPodHook) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExecNewPodHook.Merge(m, src)
+}
+func (m *ExecNewPodHook) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExecNewPodHook) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExecNewPodHook.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExecNewPodHook proto.InternalMessageInfo
+
+func (m *LifecycleHook) Reset() { *m = LifecycleHook{} }
+func (*LifecycleHook) ProtoMessage() {}
+func (*LifecycleHook) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{19}
+}
+func (m *LifecycleHook) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LifecycleHook) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LifecycleHook.Merge(m, src)
+}
+func (m *LifecycleHook) XXX_Size() int {
+ return m.Size()
+}
+func (m *LifecycleHook) XXX_DiscardUnknown() {
+ xxx_messageInfo_LifecycleHook.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo
+
+func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} }
+func (*RecreateDeploymentStrategyParams) ProtoMessage() {}
+func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{20}
+}
+func (m *RecreateDeploymentStrategyParams) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RecreateDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RecreateDeploymentStrategyParams) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RecreateDeploymentStrategyParams.Merge(m, src)
+}
+func (m *RecreateDeploymentStrategyParams) XXX_Size() int {
+ return m.Size()
+}
+func (m *RecreateDeploymentStrategyParams) XXX_DiscardUnknown() {
+ xxx_messageInfo_RecreateDeploymentStrategyParams.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RecreateDeploymentStrategyParams proto.InternalMessageInfo
+
+func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} }
+func (*RollingDeploymentStrategyParams) ProtoMessage() {}
+func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{21}
+}
+func (m *RollingDeploymentStrategyParams) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RollingDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RollingDeploymentStrategyParams) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollingDeploymentStrategyParams.Merge(m, src)
+}
+func (m *RollingDeploymentStrategyParams) XXX_Size() int {
+ return m.Size()
+}
+func (m *RollingDeploymentStrategyParams) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollingDeploymentStrategyParams.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollingDeploymentStrategyParams proto.InternalMessageInfo
+
+func (m *TagImageHook) Reset() { *m = TagImageHook{} }
+func (*TagImageHook) ProtoMessage() {}
+func (*TagImageHook) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8f1b1bee37da74c1, []int{22}
+}
+func (m *TagImageHook) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TagImageHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TagImageHook) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TagImageHook.Merge(m, src)
+}
+func (m *TagImageHook) XXX_Size() int {
+ return m.Size()
+}
+func (m *TagImageHook) XXX_DiscardUnknown() {
+ xxx_messageInfo_TagImageHook.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TagImageHook proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams")
+ proto.RegisterType((*DeploymentCause)(nil), "github.com.openshift.api.apps.v1.DeploymentCause")
+ proto.RegisterType((*DeploymentCauseImageTrigger)(nil), "github.com.openshift.api.apps.v1.DeploymentCauseImageTrigger")
+ proto.RegisterType((*DeploymentCondition)(nil), "github.com.openshift.api.apps.v1.DeploymentCondition")
+ proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig")
+ proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList")
+ proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback.UpdatedAnnotationsEntry")
+ proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec")
+ proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec.SelectorEntry")
+ proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus")
+ proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails")
+ proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog")
+ proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions")
+ proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest")
+ proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.AnnotationsEntry")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.LabelsEntry")
+ proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams")
+ proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies")
+ proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy")
+ proto.RegisterType((*ExecNewPodHook)(nil), "github.com.openshift.api.apps.v1.ExecNewPodHook")
+ proto.RegisterType((*LifecycleHook)(nil), "github.com.openshift.api.apps.v1.LifecycleHook")
+ proto.RegisterType((*RecreateDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RecreateDeploymentStrategyParams")
+ proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams")
+ proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptor_8f1b1bee37da74c1)
+}
+
+var fileDescriptor_8f1b1bee37da74c1 = []byte{
+ // 2523 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x6f, 0x1c, 0x49,
+ 0x15, 0x77, 0x7b, 0x66, 0xec, 0x99, 0xe7, 0xaf, 0xb8, 0x9c, 0x8f, 0x59, 0x2f, 0xf2, 0x58, 0xb3,
+ 0xda, 0xc5, 0xc0, 0x32, 0xb3, 0xf1, 0x86, 0xd5, 0x26, 0xd1, 0x2e, 0x78, 0x1c, 0x67, 0xd7, 0xd1,
+ 0x38, 0x31, 0x65, 0x27, 0x21, 0x11, 0x82, 0x94, 0x7b, 0xca, 0xe3, 0x5a, 0x77, 0x77, 0x0d, 0xdd,
+ 0x35, 0x93, 0x0c, 0x42, 0x68, 0x2f, 0x20, 0x21, 0xed, 0x81, 0x23, 0x5c, 0x10, 0x07, 0xae, 0x20,
+ 0x0e, 0xdc, 0x11, 0x07, 0xa4, 0x1c, 0x40, 0x5a, 0x09, 0x09, 0x56, 0x08, 0x59, 0x1b, 0x73, 0xe3,
+ 0x4f, 0xc8, 0x09, 0xd5, 0x47, 0x7f, 0xcd, 0x47, 0xec, 0x71, 0x72, 0x73, 0xbf, 0x8f, 0xdf, 0x7b,
+ 0xf5, 0xea, 0xbd, 0x57, 0xaf, 0x6a, 0x0c, 0xef, 0x34, 0x99, 0x38, 0x68, 0xef, 0x55, 0x6c, 0xee,
+ 0x56, 0x79, 0x8b, 0x7a, 0xc1, 0x01, 0xdb, 0x17, 0x55, 0xd2, 0x62, 0x55, 0xd2, 0x6a, 0x05, 0xd5,
+ 0xce, 0xe5, 0x6a, 0x93, 0x7a, 0xd4, 0x27, 0x82, 0x36, 0x2a, 0x2d, 0x9f, 0x0b, 0x8e, 0x96, 0x63,
+ 0x8d, 0x4a, 0xa4, 0x51, 0x21, 0x2d, 0x56, 0x91, 0x1a, 0x95, 0xce, 0xe5, 0xc5, 0x6f, 0x26, 0x30,
+ 0x9b, 0xbc, 0xc9, 0xab, 0x4a, 0x71, 0xaf, 0xbd, 0xaf, 0xbe, 0xd4, 0x87, 0xfa, 0x4b, 0x03, 0x2e,
+ 0x96, 0x0f, 0xdf, 0x0f, 0x2a, 0x8c, 0x2b, 0xa3, 0x36, 0xf7, 0xe9, 0x00, 0xa3, 0x8b, 0x57, 0x62,
+ 0x19, 0x97, 0xd8, 0x07, 0xcc, 0xa3, 0x7e, 0xb7, 0xda, 0x3a, 0x6c, 0x4a, 0x42, 0x50, 0x75, 0xa9,
+ 0x20, 0x83, 0xb4, 0xde, 0x1b, 0xa6, 0xe5, 0xb7, 0x3d, 0xc1, 0x5c, 0x5a, 0x0d, 0xec, 0x03, 0xea,
+ 0x92, 0x3e, 0xbd, 0x77, 0x87, 0xe9, 0xb5, 0x05, 0x73, 0xaa, 0xcc, 0x13, 0x81, 0xf0, 0x7b, 0x95,
+ 0xca, 0x7f, 0xb6, 0x60, 0x69, 0xbd, 0x1d, 0x08, 0xee, 0xde, 0xa0, 0x2d, 0x87, 0x77, 0x5d, 0xea,
+ 0x89, 0x1d, 0x21, 0x25, 0x9a, 0xdd, 0x6d, 0xe2, 0x13, 0x37, 0x40, 0x6f, 0x40, 0x8e, 0xb9, 0xa4,
+ 0x49, 0x8b, 0xd6, 0xb2, 0xb5, 0x52, 0xa8, 0xcd, 0x3c, 0x3d, 0x2a, 0x8d, 0x1d, 0x1f, 0x95, 0x72,
+ 0x9b, 0x92, 0x88, 0x35, 0x0f, 0x7d, 0x17, 0xa6, 0xa8, 0xd7, 0x61, 0x3e, 0xf7, 0x24, 0x42, 0x71,
+ 0x7c, 0x39, 0xb3, 0x32, 0xb5, 0xba, 0x58, 0xd1, 0x2e, 0xa9, 0x38, 0xcb, 0x20, 0x55, 0x3a, 0x97,
+ 0x2b, 0x1b, 0x5e, 0xe7, 0x1e, 0xf1, 0x6b, 0x0b, 0x06, 0x66, 0x6a, 0x23, 0x56, 0xc3, 0x49, 0x0c,
+ 0xf4, 0x26, 0x4c, 0xda, 0xdc, 0x75, 0x89, 0xd7, 0x28, 0x66, 0x96, 0x33, 0x2b, 0x85, 0xda, 0xd4,
+ 0xf1, 0x51, 0x69, 0x72, 0x5d, 0x93, 0x70, 0xc8, 0x2b, 0xff, 0xc5, 0x82, 0xb9, 0xd8, 0xf7, 0x75,
+ 0xd2, 0x0e, 0x28, 0xba, 0x0a, 0x59, 0xd1, 0x6d, 0x85, 0x1e, 0xbf, 0x69, 0x4c, 0x65, 0x77, 0xbb,
+ 0x2d, 0xfa, 0xfc, 0xa8, 0x74, 0x21, 0x16, 0xdf, 0xf5, 0x59, 0xb3, 0x49, 0x7d, 0xc9, 0xc0, 0x4a,
+ 0x05, 0x05, 0x30, 0xad, 0x56, 0x64, 0x38, 0xc5, 0xf1, 0x65, 0x6b, 0x65, 0x6a, 0xf5, 0x83, 0xca,
+ 0x49, 0xf9, 0x53, 0xe9, 0xf1, 0x61, 0x33, 0x01, 0x52, 0x3b, 0x77, 0x7c, 0x54, 0x9a, 0x4e, 0x52,
+ 0x70, 0xca, 0x48, 0xb9, 0x01, 0xaf, 0xbf, 0x40, 0x1d, 0x6d, 0x40, 0x76, 0xdf, 0xe7, 0xae, 0x5a,
+ 0xce, 0xd4, 0xea, 0x1b, 0x83, 0xa2, 0x7a, 0x67, 0xef, 0x13, 0x6a, 0x0b, 0x4c, 0xf7, 0xa9, 0x4f,
+ 0x3d, 0x9b, 0xd6, 0xa6, 0xc3, 0x35, 0xdf, 0xf4, 0xb9, 0x8b, 0x95, 0x7a, 0xf9, 0x5f, 0x19, 0x58,
+ 0x48, 0x98, 0xe1, 0x5e, 0x83, 0x09, 0xc6, 0x3d, 0x74, 0x3d, 0x15, 0xad, 0xaf, 0xf6, 0x44, 0xeb,
+ 0xd2, 0x00, 0x95, 0x44, 0xbc, 0xea, 0x30, 0x11, 0x08, 0x22, 0xda, 0x81, 0x8a, 0x54, 0xa1, 0x76,
+ 0xc5, 0xa8, 0x4f, 0xec, 0x28, 0xea, 0xf3, 0xa3, 0xd2, 0x80, 0x4a, 0xa9, 0x44, 0x48, 0x5a, 0x0a,
+ 0x1b, 0x0c, 0xf4, 0x09, 0xcc, 0x3a, 0x24, 0x10, 0x77, 0x5b, 0x0d, 0x22, 0xe8, 0x2e, 0x73, 0x69,
+ 0x71, 0x42, 0xad, 0xf9, 0xeb, 0x89, 0x35, 0x47, 0xc9, 0x5d, 0x69, 0x1d, 0x36, 0x25, 0x21, 0xa8,
+ 0xc8, 0x52, 0x92, 0x51, 0x90, 0x1a, 0xb5, 0x8b, 0xc6, 0x83, 0xd9, 0x7a, 0x0a, 0x09, 0xf7, 0x20,
+ 0xa3, 0x0e, 0x20, 0x49, 0xd9, 0xf5, 0x89, 0x17, 0xe8, 0x55, 0x49, 0x7b, 0x99, 0x91, 0xed, 0x2d,
+ 0x1a, 0x7b, 0xa8, 0xde, 0x87, 0x86, 0x07, 0x58, 0x40, 0x6f, 0xc1, 0x84, 0x4f, 0x49, 0xc0, 0xbd,
+ 0x62, 0x56, 0x45, 0x6c, 0x36, 0x8c, 0x18, 0x56, 0x54, 0x6c, 0xb8, 0xe8, 0x6b, 0x30, 0xe9, 0xd2,
+ 0x20, 0x90, 0x95, 0x97, 0x53, 0x82, 0x73, 0x46, 0x70, 0x72, 0x4b, 0x93, 0x71, 0xc8, 0x2f, 0xff,
+ 0x71, 0x1c, 0xce, 0xa5, 0xb6, 0x69, 0x9f, 0x35, 0xd1, 0x23, 0xc8, 0x4b, 0x3f, 0x1b, 0x44, 0x10,
+ 0x93, 0x39, 0xef, 0x9c, 0x6e, 0x55, 0x3a, 0x97, 0xb6, 0xa8, 0x20, 0x35, 0x64, 0x4c, 0x42, 0x4c,
+ 0xc3, 0x11, 0x2a, 0xfa, 0x1e, 0x64, 0x83, 0x16, 0xb5, 0x4d, 0x8d, 0xbc, 0x37, 0x52, 0x8d, 0x28,
+ 0x1f, 0x77, 0x5a, 0xd4, 0x8e, 0x53, 0x55, 0x7e, 0x61, 0x85, 0x88, 0x1e, 0x45, 0x59, 0xa5, 0xf7,
+ 0xe3, 0xfd, 0x33, 0x60, 0x2b, 0xfd, 0x38, 0xba, 0xe9, 0x4c, 0x2b, 0xff, 0xdd, 0x82, 0xf3, 0xbd,
+ 0x2a, 0x75, 0x16, 0x08, 0xf4, 0xfd, 0xbe, 0xb0, 0x55, 0x4e, 0x17, 0x36, 0xa9, 0xad, 0x82, 0x76,
+ 0xce, 0x98, 0xcc, 0x87, 0x94, 0x44, 0xc8, 0xee, 0x43, 0x8e, 0x09, 0xea, 0x06, 0xa6, 0x43, 0xae,
+ 0x8e, 0xbe, 0xae, 0x44, 0x03, 0x96, 0x40, 0x58, 0xe3, 0x95, 0x7f, 0x9e, 0x81, 0x62, 0xaf, 0x28,
+ 0xe6, 0x8e, 0xb3, 0x47, 0xec, 0x43, 0xb4, 0x0c, 0x59, 0x8f, 0xb8, 0x61, 0x85, 0x47, 0x01, 0xbf,
+ 0x4d, 0x5c, 0x8a, 0x15, 0x07, 0xfd, 0xc6, 0x02, 0xd4, 0x56, 0xb5, 0xd1, 0x58, 0xf3, 0x3c, 0x2e,
+ 0x88, 0x4c, 0xd7, 0xd0, 0x4b, 0x3c, 0xba, 0x97, 0xa1, 0xe9, 0xca, 0xdd, 0x3e, 0xd0, 0x0d, 0x4f,
+ 0xf8, 0xdd, 0xb8, 0x6a, 0xfa, 0x05, 0xf0, 0x00, 0x4f, 0xd0, 0x23, 0x93, 0x6b, 0x3a, 0x1f, 0x3e,
+ 0x3c, 0xbb, 0x47, 0xc3, 0x72, 0x6e, 0x71, 0x03, 0x2e, 0x0d, 0x71, 0x16, 0x9d, 0x83, 0xcc, 0x21,
+ 0xed, 0xea, 0xf0, 0x61, 0xf9, 0x27, 0x3a, 0x0f, 0xb9, 0x0e, 0x71, 0xda, 0x54, 0x77, 0x3d, 0xac,
+ 0x3f, 0xae, 0x8d, 0xbf, 0x6f, 0x95, 0xff, 0x94, 0x81, 0xaf, 0xbc, 0xc8, 0xf6, 0x2b, 0xea, 0xe6,
+ 0xe8, 0x6d, 0xc8, 0xfb, 0xb4, 0xc3, 0x02, 0xc6, 0x3d, 0xe5, 0x44, 0x26, 0xce, 0x3b, 0x6c, 0xe8,
+ 0x38, 0x92, 0x40, 0x6b, 0x30, 0xc7, 0x3c, 0xdb, 0x69, 0x37, 0xc2, 0x43, 0x45, 0x57, 0x56, 0xbe,
+ 0x76, 0xc9, 0x28, 0xcd, 0x6d, 0xa6, 0xd9, 0xb8, 0x57, 0x3e, 0x09, 0x41, 0xdd, 0x96, 0x43, 0x04,
+ 0x55, 0x0d, 0x6c, 0x00, 0x84, 0x61, 0xe3, 0x5e, 0x79, 0x74, 0x0f, 0x2e, 0x1a, 0x12, 0xa6, 0x2d,
+ 0x87, 0xd9, 0x2a, 0xc6, 0xb2, 0x42, 0x54, 0x87, 0xcb, 0xd7, 0x96, 0x0c, 0xd2, 0xc5, 0xcd, 0x81,
+ 0x52, 0x78, 0x88, 0x76, 0xc2, 0xb5, 0x70, 0x76, 0x51, 0xe7, 0x46, 0xbf, 0x6b, 0x21, 0x1b, 0xf7,
+ 0xca, 0x97, 0xff, 0x97, 0xeb, 0xef, 0x07, 0x6a, 0xbb, 0xf6, 0x20, 0x1f, 0x84, 0xa0, 0x7a, 0xcb,
+ 0xae, 0x8c, 0x92, 0x7c, 0xa1, 0x81, 0x78, 0x77, 0x22, 0x1f, 0x22, 0x5c, 0xe9, 0xbf, 0xcb, 0x3c,
+ 0x4c, 0x49, 0xa3, 0xbb, 0x43, 0x6d, 0xee, 0x35, 0x82, 0x62, 0x61, 0xd9, 0x5a, 0xc9, 0xc5, 0xfe,
+ 0x6f, 0xa5, 0xd9, 0xb8, 0x57, 0x1e, 0x51, 0xc8, 0x8b, 0x70, 0x67, 0x75, 0x3f, 0xbe, 0x3e, 0x8a,
+ 0x9b, 0x66, 0x97, 0xb7, 0xb9, 0xc3, 0x6c, 0x46, 0x83, 0xda, 0xb4, 0xf4, 0x34, 0xca, 0x85, 0x08,
+ 0x5a, 0x67, 0x9d, 0x0a, 0xbe, 0x4e, 0xa0, 0x5c, 0x32, 0xeb, 0x34, 0x1d, 0x47, 0x12, 0xa8, 0x0e,
+ 0xe7, 0xc3, 0x0c, 0xfc, 0x98, 0x05, 0x82, 0xfb, 0xdd, 0x3a, 0x73, 0x99, 0x50, 0x79, 0x93, 0xab,
+ 0x15, 0x8f, 0x8f, 0x4a, 0xe7, 0xf1, 0x00, 0x3e, 0x1e, 0xa8, 0x25, 0xbb, 0x98, 0xa0, 0x81, 0x30,
+ 0xb9, 0x12, 0xd5, 0xc4, 0x2e, 0x0d, 0x04, 0x56, 0x1c, 0x79, 0xb4, 0xb6, 0xe4, 0xf4, 0xd4, 0x30,
+ 0xdb, 0x1f, 0x35, 0xff, 0x6d, 0x45, 0xc5, 0x86, 0x8b, 0x7c, 0xc8, 0x07, 0xd4, 0xa1, 0xb6, 0xe0,
+ 0x7e, 0x71, 0x52, 0xb5, 0xb8, 0x1b, 0x67, 0x3b, 0xbc, 0x2a, 0x3b, 0x06, 0x46, 0x37, 0xb5, 0x78,
+ 0x8f, 0x0d, 0x19, 0x47, 0x76, 0xd0, 0x16, 0xe4, 0x45, 0x58, 0x37, 0xf9, 0xe1, 0xa5, 0xbf, 0xcd,
+ 0x1b, 0x61, 0xb9, 0xe8, 0x4e, 0xa5, 0x36, 0x22, 0xac, 0xa8, 0x08, 0x62, 0xf1, 0x3a, 0xcc, 0xa4,
+ 0x6c, 0x8f, 0xd4, 0xa3, 0xfe, 0x90, 0x83, 0x8b, 0x83, 0xcf, 0x4b, 0x74, 0x1d, 0x66, 0x24, 0x7e,
+ 0x20, 0xee, 0x51, 0x5f, 0xf5, 0x16, 0x4b, 0xf5, 0x96, 0x0b, 0x66, 0x65, 0x33, 0xf5, 0x24, 0x13,
+ 0xa7, 0x65, 0xd1, 0x2d, 0x40, 0x7c, 0x2f, 0xa0, 0x7e, 0x87, 0x36, 0x3e, 0xd2, 0x17, 0x8d, 0xb8,
+ 0x3b, 0x45, 0x0d, 0xff, 0x4e, 0x9f, 0x04, 0x1e, 0xa0, 0x35, 0x62, 0xa6, 0xad, 0xc1, 0x9c, 0x39,
+ 0x34, 0x42, 0xa6, 0x49, 0xb2, 0xa8, 0x82, 0xee, 0xa6, 0xd9, 0xb8, 0x57, 0x1e, 0x7d, 0x04, 0xf3,
+ 0xa4, 0x43, 0x98, 0x43, 0xf6, 0x1c, 0x1a, 0x81, 0xe4, 0x14, 0xc8, 0x6b, 0x06, 0x64, 0x7e, 0xad,
+ 0x57, 0x00, 0xf7, 0xeb, 0xa0, 0x2d, 0x58, 0x68, 0x7b, 0xfd, 0x50, 0x13, 0x0a, 0xea, 0x75, 0x03,
+ 0xb5, 0x70, 0xb7, 0x5f, 0x04, 0x0f, 0xd2, 0x43, 0x0f, 0x61, 0xb2, 0x41, 0x05, 0x61, 0x4e, 0x50,
+ 0x9c, 0x54, 0x79, 0xf3, 0xee, 0x28, 0xb9, 0x7a, 0x43, 0xab, 0xea, 0xcb, 0x93, 0xf9, 0xc0, 0x21,
+ 0x20, 0x62, 0x00, 0x76, 0x38, 0x8a, 0x07, 0xc5, 0xbc, 0x2a, 0x85, 0x6f, 0x8d, 0x58, 0x0a, 0x5a,
+ 0x3b, 0x1e, 0x15, 0x23, 0x52, 0x80, 0x13, 0xe0, 0x32, 0xb1, 0x7c, 0xd9, 0xb0, 0xa2, 0x78, 0xe8,
+ 0x0e, 0x17, 0x25, 0x16, 0x4e, 0x32, 0x71, 0x5a, 0xb6, 0xfc, 0x6b, 0x0b, 0xe6, 0xfb, 0xd6, 0x94,
+ 0x9c, 0x90, 0xad, 0x17, 0x4f, 0xc8, 0xe8, 0x01, 0x4c, 0xd8, 0xb2, 0xf6, 0xc3, 0x91, 0xe6, 0xf2,
+ 0xc8, 0x17, 0xba, 0xb8, 0x99, 0xa8, 0xcf, 0x00, 0x1b, 0xc0, 0xf2, 0x1c, 0xcc, 0xc4, 0xa2, 0x75,
+ 0xde, 0x2c, 0x7f, 0x96, 0x4d, 0x1e, 0x25, 0x75, 0xde, 0xbc, 0xd3, 0xd2, 0x21, 0xa8, 0x42, 0xc1,
+ 0xe6, 0x9e, 0x20, 0x72, 0x80, 0x34, 0x1e, 0xcf, 0x1b, 0xd0, 0xc2, 0x7a, 0xc8, 0xc0, 0xb1, 0x8c,
+ 0xec, 0x67, 0xfb, 0xdc, 0x71, 0xf8, 0x63, 0x55, 0x43, 0x89, 0x7e, 0x76, 0x53, 0x51, 0xb1, 0xe1,
+ 0xca, 0x5a, 0x69, 0xc9, 0x96, 0xc9, 0xdb, 0xe1, 0xb1, 0x1e, 0xd5, 0xca, 0xb6, 0xa1, 0xe3, 0x48,
+ 0x02, 0x5d, 0x81, 0xe9, 0x80, 0x79, 0x36, 0x0d, 0x8f, 0x9a, 0xac, 0x9e, 0x1e, 0xe4, 0x1d, 0x75,
+ 0x27, 0x41, 0xc7, 0x29, 0x29, 0x74, 0x1f, 0x0a, 0xea, 0x5b, 0xdd, 0x92, 0x72, 0x23, 0xdf, 0x92,
+ 0x66, 0xe4, 0x22, 0x77, 0x42, 0x00, 0x1c, 0x63, 0xa1, 0x55, 0x00, 0xc1, 0x5c, 0x1a, 0x08, 0xe2,
+ 0xb6, 0x02, 0xd3, 0xb8, 0xa3, 0x64, 0xda, 0x8d, 0x38, 0x38, 0x21, 0x85, 0xbe, 0x01, 0x05, 0x99,
+ 0x02, 0x75, 0xe6, 0x51, 0x5d, 0x15, 0x19, 0x6d, 0x60, 0x37, 0x24, 0xe2, 0x98, 0x8f, 0x2a, 0x00,
+ 0x8e, 0x3c, 0x40, 0x6a, 0x5d, 0x41, 0x03, 0xd5, 0x7b, 0x33, 0xb5, 0x59, 0x09, 0x5e, 0x8f, 0xa8,
+ 0x38, 0x21, 0x21, 0xa3, 0xee, 0xf1, 0xc7, 0x84, 0x09, 0x95, 0xa2, 0x89, 0xa8, 0xdf, 0xe6, 0xf7,
+ 0x09, 0x13, 0xd8, 0x70, 0xd1, 0x9b, 0x30, 0xd9, 0x31, 0x4d, 0x12, 0x14, 0xa8, 0xaa, 0xb1, 0xb0,
+ 0x35, 0x86, 0xbc, 0xf2, 0xbf, 0x53, 0xb9, 0x8b, 0xe9, 0x8f, 0xda, 0xf2, 0xa8, 0x3a, 0x79, 0x24,
+ 0x7f, 0x0b, 0x26, 0x74, 0x77, 0xed, 0xdd, 0x7c, 0xdd, 0x82, 0xb1, 0xe1, 0xa2, 0x37, 0x20, 0xb7,
+ 0xcf, 0x7d, 0x9b, 0x9a, 0x9d, 0x8f, 0xae, 0x07, 0x37, 0x25, 0x11, 0x6b, 0x1e, 0xba, 0x07, 0x73,
+ 0xf4, 0x49, 0x7a, 0xfe, 0xcb, 0xaa, 0x47, 0x95, 0xb7, 0x65, 0x6f, 0xdc, 0x48, 0xb3, 0x86, 0xbf,
+ 0x91, 0xf4, 0x82, 0x94, 0xff, 0x31, 0x09, 0xa8, 0x7f, 0xd8, 0x41, 0xd7, 0x52, 0x4f, 0x0a, 0x6f,
+ 0xf5, 0x3c, 0x29, 0x5c, 0xec, 0xd7, 0x48, 0xbc, 0x28, 0x74, 0x60, 0xda, 0x56, 0x2f, 0x52, 0xfa,
+ 0xfd, 0xc9, 0x4c, 0x33, 0xdf, 0x39, 0xb9, 0x60, 0x5f, 0xfc, 0x8e, 0xa5, 0x13, 0x7c, 0x3d, 0x81,
+ 0x8c, 0x53, 0x76, 0xd0, 0x4f, 0x61, 0xd6, 0xa7, 0xb6, 0x4f, 0x89, 0xa0, 0xc6, 0xb2, 0xbe, 0x6b,
+ 0xd4, 0x4e, 0xb6, 0x8c, 0x8d, 0xde, 0x50, 0xdb, 0xe8, 0xf8, 0xa8, 0x34, 0x8b, 0x53, 0xe8, 0xb8,
+ 0xc7, 0x1a, 0xfa, 0x31, 0xcc, 0xf8, 0xdc, 0x71, 0x98, 0xd7, 0x34, 0xe6, 0xb3, 0xca, 0xfc, 0xda,
+ 0x29, 0xcc, 0x6b, 0xb5, 0xa1, 0xd6, 0xe7, 0x55, 0x7f, 0x4d, 0x62, 0xe3, 0xb4, 0x29, 0xf4, 0x00,
+ 0x0a, 0x3e, 0x0d, 0x78, 0xdb, 0xb7, 0x69, 0x60, 0x8a, 0x7b, 0x65, 0xd0, 0x74, 0x82, 0x8d, 0x90,
+ 0xcc, 0x62, 0xe6, 0x53, 0x69, 0x2b, 0x88, 0x7b, 0x58, 0xc8, 0x0d, 0x70, 0x8c, 0x86, 0x0e, 0x64,
+ 0x1a, 0xef, 0x51, 0x47, 0x96, 0x76, 0xe6, 0x74, 0x1b, 0xd9, 0xbf, 0x90, 0x4a, 0x5d, 0x41, 0xe8,
+ 0x29, 0x2b, 0x51, 0x08, 0x92, 0x88, 0x0d, 0x3e, 0xfa, 0x09, 0x4c, 0x91, 0xc4, 0xdd, 0x55, 0x0f,
+ 0x76, 0x1b, 0x67, 0x32, 0xd7, 0x77, 0x5d, 0x8d, 0x9e, 0x2b, 0x93, 0xf7, 0xd4, 0xa4, 0x39, 0x74,
+ 0x07, 0x2e, 0x10, 0x5b, 0xb0, 0x0e, 0xbd, 0x41, 0x49, 0xc3, 0x61, 0x5e, 0xd4, 0x5e, 0x75, 0xc3,
+ 0x79, 0xed, 0xf8, 0xa8, 0x74, 0x61, 0x6d, 0x90, 0x00, 0x1e, 0xac, 0xb7, 0x78, 0x15, 0xa6, 0x12,
+ 0xab, 0x1e, 0x65, 0xbe, 0x5b, 0xfc, 0x10, 0xce, 0xbd, 0xd4, 0x1d, 0xf6, 0x77, 0xe3, 0x50, 0xee,
+ 0x6b, 0x00, 0xea, 0x49, 0x72, 0xfd, 0x80, 0x78, 0xcd, 0x30, 0x63, 0xab, 0x50, 0x20, 0x6d, 0xc1,
+ 0x5d, 0x22, 0x98, 0xad, 0x80, 0xf3, 0x71, 0x2e, 0xac, 0x85, 0x0c, 0x1c, 0xcb, 0xa0, 0x6b, 0x30,
+ 0x1b, 0x1d, 0x6e, 0xb2, 0xd3, 0xe9, 0xd3, 0xb8, 0xa0, 0xcb, 0x63, 0x3d, 0xc5, 0xc1, 0x3d, 0x92,
+ 0xd1, 0xb5, 0x39, 0xf3, 0x72, 0xd7, 0xe6, 0x5b, 0xe1, 0xab, 0x9f, 0x5a, 0x13, 0x6d, 0xa8, 0x55,
+ 0x99, 0x97, 0xb8, 0x9e, 0x97, 0xbc, 0xa4, 0x04, 0x1e, 0xa0, 0x55, 0xfe, 0x99, 0x05, 0xaf, 0x0d,
+ 0xbd, 0x42, 0xa1, 0x1f, 0x84, 0x4f, 0x3d, 0x96, 0x4a, 0xc4, 0xab, 0x67, 0xbd, 0x8e, 0x75, 0x07,
+ 0xbf, 0xf8, 0x5c, 0xcb, 0xff, 0xea, 0xb7, 0xa5, 0xb1, 0x4f, 0xff, 0xb3, 0x3c, 0x56, 0xfe, 0xd2,
+ 0x82, 0x4b, 0x43, 0x74, 0x5f, 0xe6, 0x29, 0xfc, 0x17, 0x16, 0xcc, 0xb3, 0xde, 0x4d, 0x37, 0xed,
+ 0xf8, 0xc6, 0x19, 0x56, 0xd3, 0x97, 0x40, 0xb5, 0x0b, 0x72, 0xa6, 0xee, 0x23, 0xe3, 0x7e, 0xab,
+ 0xe5, 0x7f, 0x5a, 0x30, 0xbb, 0xf1, 0x84, 0xda, 0xb7, 0xe9, 0xe3, 0x6d, 0xde, 0xf8, 0x98, 0xf3,
+ 0xc3, 0xe4, 0xef, 0x03, 0xd6, 0xf0, 0xdf, 0x07, 0xd0, 0x55, 0xc8, 0x50, 0xaf, 0x73, 0x8a, 0x5f,
+ 0x24, 0xa6, 0x4c, 0x6c, 0x32, 0x1b, 0x5e, 0x07, 0x4b, 0x1d, 0x39, 0xb2, 0xa6, 0x92, 0x50, 0xe5,
+ 0x5e, 0x21, 0x1e, 0x59, 0x53, 0x19, 0x8b, 0xd3, 0xb2, 0x6a, 0x3a, 0xe0, 0x4e, 0x5b, 0x26, 0x79,
+ 0x36, 0x76, 0xef, 0x9e, 0x26, 0xe1, 0x90, 0x57, 0xfe, 0xfd, 0x38, 0xcc, 0xd4, 0xd9, 0x3e, 0xb5,
+ 0xbb, 0xb6, 0x43, 0xd5, 0xba, 0x1e, 0xc0, 0xcc, 0x3e, 0x61, 0x4e, 0xdb, 0xa7, 0x7a, 0x0b, 0xcd,
+ 0xd6, 0xbd, 0x1b, 0x5a, 0xbd, 0x99, 0x64, 0x3e, 0x3f, 0x2a, 0x2d, 0xa6, 0xd4, 0x53, 0x5c, 0x9c,
+ 0x46, 0x42, 0x8f, 0x00, 0x68, 0x14, 0x44, 0xb3, 0x93, 0xef, 0x9c, 0xbc, 0x93, 0xe9, 0xc0, 0xeb,
+ 0xd9, 0x29, 0xa6, 0xe1, 0x04, 0x26, 0xfa, 0xa1, 0x1c, 0xcc, 0x9a, 0x6a, 0x4b, 0x03, 0xf5, 0xb3,
+ 0xcd, 0xd4, 0x6a, 0xe5, 0x64, 0x03, 0xbb, 0x46, 0x45, 0xc1, 0x47, 0x2d, 0x24, 0xa4, 0xaa, 0x61,
+ 0xce, 0xfc, 0x59, 0xfe, 0xeb, 0x38, 0x2c, 0x9f, 0x74, 0xdc, 0xca, 0x3e, 0x23, 0x87, 0x45, 0xde,
+ 0x16, 0x61, 0x13, 0xd6, 0xb7, 0x58, 0xd5, 0x67, 0x76, 0x53, 0x1c, 0xdc, 0x23, 0x89, 0x6e, 0x41,
+ 0xa6, 0xe5, 0x53, 0x13, 0x9c, 0xea, 0xc9, 0xbe, 0xa7, 0xa2, 0x5f, 0x9b, 0x94, 0x09, 0xb4, 0xed,
+ 0x53, 0x2c, 0x41, 0x24, 0x96, 0xcb, 0x1a, 0xa6, 0x65, 0x9d, 0x0d, 0x6b, 0x8b, 0x35, 0xb0, 0x04,
+ 0x41, 0x5b, 0x90, 0x6d, 0xf1, 0x40, 0x98, 0xa9, 0x60, 0x64, 0xb0, 0xbc, 0xac, 0xfa, 0x6d, 0x1e,
+ 0x08, 0xac, 0x60, 0xca, 0x7f, 0xcb, 0x42, 0xe9, 0x84, 0xb9, 0x01, 0x6d, 0xc2, 0x82, 0xbe, 0x24,
+ 0x6f, 0x53, 0x9f, 0xf1, 0x46, 0x3a, 0x96, 0x97, 0xd4, 0x25, 0xb6, 0x9f, 0x8d, 0x07, 0xe9, 0xa0,
+ 0x0f, 0x60, 0x8e, 0x79, 0x82, 0xfa, 0x1d, 0xe2, 0x84, 0x30, 0xfa, 0x59, 0x60, 0x41, 0xbf, 0xce,
+ 0xa5, 0x58, 0xb8, 0x57, 0x76, 0xc0, 0x86, 0x66, 0x4e, 0xbd, 0xa1, 0x0e, 0xcc, 0xba, 0xe4, 0x49,
+ 0xe2, 0xba, 0x6d, 0x42, 0x38, 0xfc, 0xd7, 0x90, 0xb6, 0x60, 0x4e, 0x45, 0xff, 0x60, 0x5a, 0xd9,
+ 0xf4, 0xc4, 0x1d, 0x7f, 0x47, 0xf8, 0xcc, 0x6b, 0x6a, 0x6b, 0x5b, 0x29, 0x2c, 0xdc, 0x83, 0x8d,
+ 0x1e, 0x42, 0xde, 0x25, 0x4f, 0x76, 0xda, 0x7e, 0x33, 0xbc, 0x25, 0x8d, 0x6e, 0x47, 0xbd, 0xf9,
+ 0x6c, 0x19, 0x14, 0x1c, 0xe1, 0x85, 0xa9, 0x39, 0xf9, 0x2a, 0x52, 0x33, 0x4c, 0xa7, 0xfc, 0xab,
+ 0x49, 0xa7, 0xcf, 0x2c, 0x98, 0x4e, 0x56, 0x71, 0x7f, 0xef, 0xb4, 0x46, 0xe8, 0x9d, 0xdf, 0x86,
+ 0x71, 0xc1, 0x4d, 0x09, 0x9e, 0xea, 0xa4, 0x07, 0x03, 0x3b, 0xbe, 0xcb, 0xf1, 0xb8, 0xe0, 0xb5,
+ 0x9b, 0x4f, 0x9f, 0x2d, 0x8d, 0x7d, 0xfe, 0x6c, 0x69, 0xec, 0x8b, 0x67, 0x4b, 0x63, 0x9f, 0x1e,
+ 0x2f, 0x59, 0x4f, 0x8f, 0x97, 0xac, 0xcf, 0x8f, 0x97, 0xac, 0x2f, 0x8e, 0x97, 0xac, 0x2f, 0x8f,
+ 0x97, 0xac, 0x5f, 0xfe, 0x77, 0x69, 0xec, 0xe1, 0xf2, 0x49, 0xff, 0x46, 0xf0, 0xff, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0x5e, 0x3a, 0xd7, 0x70, 0x69, 0x20, 0x00, 0x00,
+}
+
+func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CustomDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CustomDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Command) > 0 {
+ for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Command[iNdEx])
+ copy(dAtA[i:], m.Command[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Environment) > 0 {
+ for iNdEx := len(m.Environment) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Environment[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Image)
+ copy(dAtA[i:], m.Image)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ImageTrigger != nil {
+ {
+ size, err := m.ImageTrigger.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentCauseImageTrigger) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentCauseImageTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentConfigList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentConfigRollback) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentConfigRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.UpdatedAnnotations) > 0 {
+ keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations))
+ for k := range m.UpdatedAnnotations {
+ keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations)
+ for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForUpdatedAnnotations[iNdEx])
+ copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentConfigRollbackSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentConfigRollbackSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.IncludeStrategy {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ i--
+ if m.IncludeReplicationMeta {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ i--
+ if m.IncludeTemplate {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ i--
+ if m.IncludeTriggers {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Revision))
+ i--
+ dAtA[i] = 0x10
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentConfigSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x48
+ if m.Template != nil {
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.Selector) > 0 {
+ keysForSelector := make([]string, 0, len(m.Selector))
+ for k := range m.Selector {
+ keysForSelector = append(keysForSelector, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Selector[string(keysForSelector[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForSelector[iNdEx])
+ copy(dAtA[i:], keysForSelector[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ i--
+ if m.Paused {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ i--
+ if m.Test {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ if m.RevisionHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
+ i--
+ dAtA[i] = 0x20
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x18
+ if m.Triggers != nil {
+ {
+ size, err := m.Triggers.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+ i--
+ dAtA[i] = 0x48
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if m.Details != nil {
+ {
+ size, err := m.Details.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas))
+ i--
+ dAtA[i] = 0x30
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentDetails) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Causes) > 0 {
+ for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentLog) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentLog) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentLog) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentLogOptions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Version != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Version))
+ i--
+ dAtA[i] = 0x50
+ }
+ i--
+ if m.NoWait {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x48
+ if m.LimitBytes != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes))
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.TailLines != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines))
+ i--
+ dAtA[i] = 0x38
+ }
+ i--
+ if m.Timestamps {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ if m.SinceTime != nil {
+ {
+ size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.SinceSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds))
+ i--
+ dAtA[i] = 0x20
+ }
+ i--
+ if m.Previous {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ i--
+ if m.Follow {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Container)
+ copy(dAtA[i:], m.Container)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ExcludeTriggers) > 0 {
+ for iNdEx := len(m.ExcludeTriggers) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ExcludeTriggers[iNdEx])
+ copy(dAtA[i:], m.ExcludeTriggers[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExcludeTriggers[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i--
+ if m.Force {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ i--
+ if m.Latest {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ActiveDeadlineSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds))
+ i--
+ dAtA[i] = 0x40
+ }
+ if len(m.Annotations) > 0 {
+ keysForAnnotations := make([]string, 0, len(m.Annotations))
+ for k := range m.Annotations {
+ keysForAnnotations = append(keysForAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Annotations[string(keysForAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.Labels) > 0 {
+ keysForLabels := make([]string, 0, len(m.Labels))
+ for k := range m.Labels {
+ keysForLabels = append(keysForLabels, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Labels[string(keysForLabels[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForLabels[iNdEx])
+ copy(dAtA[i:], keysForLabels[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ {
+ size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if m.RollingParams != nil {
+ {
+ size, err := m.RollingParams.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.RecreateParams != nil {
+ {
+ size, err := m.RecreateParams.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CustomParams != nil {
+ {
+ size, err := m.CustomParams.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentTriggerImageChangeParams) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentTriggerImageChangeParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.LastTriggeredImage)
+ copy(dAtA[i:], m.LastTriggeredImage)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.ContainerNames) > 0 {
+ for iNdEx := len(m.ContainerNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ContainerNames[iNdEx])
+ copy(dAtA[i:], m.ContainerNames[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerNames[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i--
+ if m.Automatic {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m DeploymentTriggerPolicies) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m DeploymentTriggerPolicies) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeploymentTriggerPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeploymentTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ImageChangeParams != nil {
+ {
+ size, err := m.ImageChangeParams.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExecNewPodHook) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecNewPodHook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Volumes) > 0 {
+ for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Volumes[iNdEx])
+ copy(dAtA[i:], m.Volumes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i -= len(m.ContainerName)
+ copy(dAtA[i:], m.ContainerName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName)))
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Command) > 0 {
+ for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Command[iNdEx])
+ copy(dAtA[i:], m.Command[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LifecycleHook) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.TagImages) > 0 {
+ for iNdEx := len(m.TagImages) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.TagImages[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.ExecNewPod != nil {
+ {
+ size, err := m.ExecNewPod.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.FailurePolicy)
+ copy(dAtA[i:], m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RecreateDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RecreateDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Post != nil {
+ {
+ size, err := m.Post.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Mid != nil {
+ {
+ size, err := m.Mid.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Pre != nil {
+ {
+ size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.TimeoutSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RollingDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RollingDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Post != nil {
+ {
+ size, err := m.Post.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Pre != nil {
+ {
+ size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.MaxSurge != nil {
+ {
+ size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.MaxUnavailable != nil {
+ {
+ size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.TimeoutSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.IntervalSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.UpdatePeriodSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TagImageHook) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TagImageHook) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TagImageHook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.To.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.ContainerName)
+ copy(dAtA[i:], m.ContainerName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *CustomDeploymentStrategyParams) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Image)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Environment) > 0 {
+ for _, e := range m.Environment {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ImageTrigger != nil {
+ l = m.ImageTrigger.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *DeploymentCauseImageTrigger) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentConfigList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentConfigRollback) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.UpdatedAnnotations) > 0 {
+ for k, v := range m.UpdatedAnnotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DeploymentConfigRollbackSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Revision))
+ n += 2
+ n += 2
+ n += 2
+ n += 2
+ return n
+}
+
+func (m *DeploymentConfigSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Strategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Triggers != nil {
+ l = m.Triggers.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ if m.RevisionHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
+ }
+ n += 2
+ n += 2
+ if len(m.Selector) > 0 {
+ for k, v := range m.Selector {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.Template != nil {
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+ return n
+}
+
+func (m *DeploymentConfigStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.LatestVersion))
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ n += 1 + sovGenerated(uint64(m.Replicas))
+ n += 1 + sovGenerated(uint64(m.UpdatedReplicas))
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+ n += 1 + sovGenerated(uint64(m.UnavailableReplicas))
+ if m.Details != nil {
+ l = m.Details.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+ return n
+}
+
+func (m *DeploymentDetails) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Causes) > 0 {
+ for _, e := range m.Causes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentLog) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *DeploymentLogOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Container)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ n += 2
+ if m.SinceSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.SinceSeconds))
+ }
+ if m.SinceTime != nil {
+ l = m.SinceTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if m.TailLines != nil {
+ n += 1 + sovGenerated(uint64(*m.TailLines))
+ }
+ if m.LimitBytes != nil {
+ n += 1 + sovGenerated(uint64(*m.LimitBytes))
+ }
+ n += 2
+ if m.Version != nil {
+ n += 1 + sovGenerated(uint64(*m.Version))
+ }
+ return n
+}
+
+func (m *DeploymentRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ n += 2
+ if len(m.ExcludeTriggers) > 0 {
+ for _, s := range m.ExcludeTriggers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.CustomParams != nil {
+ l = m.CustomParams.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RecreateParams != nil {
+ l = m.RecreateParams.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RollingParams != nil {
+ l = m.RollingParams.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Resources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.ActiveDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds))
+ }
+ return n
+}
+
+func (m *DeploymentTriggerImageChangeParams) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ if len(m.ContainerNames) > 0 {
+ for _, s := range m.ContainerNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.LastTriggeredImage)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m DeploymentTriggerPolicies) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for _, e := range m {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeploymentTriggerPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ImageChangeParams != nil {
+ l = m.ImageChangeParams.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ExecNewPodHook) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ContainerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Volumes) > 0 {
+ for _, s := range m.Volumes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LifecycleHook) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ExecNewPod != nil {
+ l = m.ExecNewPod.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.TagImages) > 0 {
+ for _, e := range m.TagImages {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RecreateDeploymentStrategyParams) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ }
+ if m.Pre != nil {
+ l = m.Pre.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Mid != nil {
+ l = m.Mid.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Post != nil {
+ l = m.Post.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RollingDeploymentStrategyParams) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.UpdatePeriodSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.UpdatePeriodSeconds))
+ }
+ if m.IntervalSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.IntervalSeconds))
+ }
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ }
+ if m.MaxUnavailable != nil {
+ l = m.MaxUnavailable.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MaxSurge != nil {
+ l = m.MaxSurge.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Pre != nil {
+ l = m.Pre.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Post != nil {
+ l = m.Post.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *TagImageHook) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ContainerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.To.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CustomDeploymentStrategyParams) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnvironment := "[]EnvVar{"
+ for _, f := range this.Environment {
+ repeatedStringForEnvironment += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnvironment += "}"
+ s := strings.Join([]string{`&CustomDeploymentStrategyParams{`,
+ `Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+ `Environment:` + repeatedStringForEnvironment + `,`,
+ `Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentCause{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ImageTrigger:` + strings.Replace(this.ImageTrigger.String(), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentCauseImageTrigger) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentCauseImageTrigger{`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentConfig{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigSpec", "DeploymentConfigSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentConfigStatus", "DeploymentConfigStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentConfigList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]DeploymentConfig{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&DeploymentConfigList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentConfigRollback) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations))
+ for k := range this.UpdatedAnnotations {
+ keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations)
+ mapStringForUpdatedAnnotations := "map[string]string{"
+ for _, k := range keysForUpdatedAnnotations {
+ mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k])
+ }
+ mapStringForUpdatedAnnotations += "}"
+ s := strings.Join([]string{`&DeploymentConfigRollback{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigRollbackSpec", "DeploymentConfigRollbackSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentConfigRollbackSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentConfigRollbackSpec{`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
+ `IncludeTriggers:` + fmt.Sprintf("%v", this.IncludeTriggers) + `,`,
+ `IncludeTemplate:` + fmt.Sprintf("%v", this.IncludeTemplate) + `,`,
+ `IncludeReplicationMeta:` + fmt.Sprintf("%v", this.IncludeReplicationMeta) + `,`,
+ `IncludeStrategy:` + fmt.Sprintf("%v", this.IncludeStrategy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentConfigSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForSelector := make([]string, 0, len(this.Selector))
+ for k := range this.Selector {
+ keysForSelector = append(keysForSelector, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ mapStringForSelector := "map[string]string{"
+ for _, k := range keysForSelector {
+ mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
+ }
+ mapStringForSelector += "}"
+ s := strings.Join([]string{`&DeploymentConfigSpec{`,
+ `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`,
+ `Triggers:` + strings.Replace(fmt.Sprintf("%v", this.Triggers), "DeploymentTriggerPolicies", "DeploymentTriggerPolicies", 1) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `Test:` + fmt.Sprintf("%v", this.Test) + `,`,
+ `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`,
+ `Selector:` + mapStringForSelector + `,`,
+ `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentConfigStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]DeploymentCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&DeploymentConfigStatus{`,
+ `LatestVersion:` + fmt.Sprintf("%v", this.LatestVersion) + `,`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+ `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+ `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`,
+ `Details:` + strings.Replace(this.Details.String(), "DeploymentDetails", "DeploymentDetails", 1) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentDetails) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForCauses := "[]DeploymentCause{"
+ for _, f := range this.Causes {
+ repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForCauses += "}"
+ s := strings.Join([]string{`&DeploymentDetails{`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Causes:` + repeatedStringForCauses + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentLog) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentLog{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentLogOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentLogOptions{`,
+ `Container:` + fmt.Sprintf("%v", this.Container) + `,`,
+ `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`,
+ `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`,
+ `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`,
+ `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v11.Time", 1) + `,`,
+ `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`,
+ `TailLines:` + valueToStringGenerated(this.TailLines) + `,`,
+ `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`,
+ `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`,
+ `Version:` + valueToStringGenerated(this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentRequest{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Latest:` + fmt.Sprintf("%v", this.Latest) + `,`,
+ `Force:` + fmt.Sprintf("%v", this.Force) + `,`,
+ `ExcludeTriggers:` + fmt.Sprintf("%v", this.ExcludeTriggers) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForLabels := make([]string, 0, len(this.Labels))
+ for k := range this.Labels {
+ keysForLabels = append(keysForLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ mapStringForLabels := "map[string]string{"
+ for _, k := range keysForLabels {
+ mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+ }
+ mapStringForLabels += "}"
+ keysForAnnotations := make([]string, 0, len(this.Annotations))
+ for k := range this.Annotations {
+ keysForAnnotations = append(keysForAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ mapStringForAnnotations := "map[string]string{"
+ for _, k := range keysForAnnotations {
+ mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+ }
+ mapStringForAnnotations += "}"
+ s := strings.Join([]string{`&DeploymentStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `CustomParams:` + strings.Replace(this.CustomParams.String(), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`,
+ `RecreateParams:` + strings.Replace(this.RecreateParams.String(), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`,
+ `RollingParams:` + strings.Replace(this.RollingParams.String(), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`,
+ `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`,
+ `Labels:` + mapStringForLabels + `,`,
+ `Annotations:` + mapStringForAnnotations + `,`,
+ `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentTriggerImageChangeParams) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentTriggerImageChangeParams{`,
+ `Automatic:` + fmt.Sprintf("%v", this.Automatic) + `,`,
+ `ContainerNames:` + fmt.Sprintf("%v", this.ContainerNames) + `,`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `LastTriggeredImage:` + fmt.Sprintf("%v", this.LastTriggeredImage) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeploymentTriggerPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DeploymentTriggerPolicy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `ImageChangeParams:` + strings.Replace(this.ImageChangeParams.String(), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ExecNewPodHook) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnv := "[]EnvVar{"
+ for _, f := range this.Env {
+ repeatedStringForEnv += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnv += "}"
+ s := strings.Join([]string{`&ExecNewPodHook{`,
+ `Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+ `Env:` + repeatedStringForEnv + `,`,
+ `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`,
+ `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LifecycleHook) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTagImages := "[]TagImageHook{"
+ for _, f := range this.TagImages {
+ repeatedStringForTagImages += strings.Replace(strings.Replace(f.String(), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTagImages += "}"
+ s := strings.Join([]string{`&LifecycleHook{`,
+ `FailurePolicy:` + fmt.Sprintf("%v", this.FailurePolicy) + `,`,
+ `ExecNewPod:` + strings.Replace(this.ExecNewPod.String(), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`,
+ `TagImages:` + repeatedStringForTagImages + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RecreateDeploymentStrategyParams) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RecreateDeploymentStrategyParams{`,
+ `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+ `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`,
+ `Mid:` + strings.Replace(this.Mid.String(), "LifecycleHook", "LifecycleHook", 1) + `,`,
+ `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RollingDeploymentStrategyParams) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RollingDeploymentStrategyParams{`,
+ `UpdatePeriodSeconds:` + valueToStringGenerated(this.UpdatePeriodSeconds) + `,`,
+ `IntervalSeconds:` + valueToStringGenerated(this.IntervalSeconds) + `,`,
+ `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+ `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`,
+ `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TagImageHook) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TagImageHook{`,
+ `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`,
+ `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomDeploymentStrategyParams: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Image = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Environment = append(m.Environment, v1.EnvVar{})
+ if err := m.Environment[len(m.Environment)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageTrigger", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageTrigger == nil {
+ m.ImageTrigger = &DeploymentCauseImageTrigger{}
+ }
+ if err := m.ImageTrigger.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentCauseImageTrigger: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentCauseImageTrigger: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentConfigList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentConfigList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, DeploymentConfig{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentConfigRollback: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentConfigRollback: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpdatedAnnotations == nil {
+ m.UpdatedAnnotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.UpdatedAnnotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentConfigRollbackSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentConfigRollbackSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncludeTriggers", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IncludeTriggers = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncludeTemplate", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IncludeTemplate = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncludeReplicationMeta", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IncludeReplicationMeta = bool(v != 0)
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncludeStrategy", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IncludeStrategy = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentConfigSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Triggers == nil {
+ m.Triggers = DeploymentTriggerPolicies{}
+ }
+ if err := m.Triggers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RevisionHistoryLimit = &v
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Test = bool(v != 0)
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Paused = bool(v != 0)
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Selector[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Template == nil {
+ m.Template = &v1.PodTemplateSpec{}
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentConfigStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LatestVersion", wireType)
+ }
+ m.LatestVersion = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.LatestVersion |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+ }
+ m.Replicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Replicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType)
+ }
+ m.UpdatedReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UpdatedReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType)
+ }
+ m.UnavailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UnavailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Details == nil {
+ m.Details = &DeploymentDetails{}
+ }
+ if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, DeploymentCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+ }
+ m.ReadyReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ReadyReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentDetails) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentDetails: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Causes = append(m.Causes, DeploymentCause{})
+ if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentLog) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentLog: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentLog: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentLogOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentLogOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Container = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Follow = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Previous = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SinceSeconds = &v
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SinceTime == nil {
+ m.SinceTime = &v11.Time{}
+ }
+ if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Timestamps = bool(v != 0)
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TailLines = &v
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LimitBytes = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NoWait = bool(v != 0)
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Version = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Latest", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Latest = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Force = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTriggers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExcludeTriggers = append(m.ExcludeTriggers, DeploymentTriggerType(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CustomParams", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CustomParams == nil {
+ m.CustomParams = &CustomDeploymentStrategyParams{}
+ }
+ if err := m.CustomParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RecreateParams", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RecreateParams == nil {
+ m.RecreateParams = &RecreateDeploymentStrategyParams{}
+ }
+ if err := m.RecreateParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RollingParams", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RollingParams == nil {
+ m.RollingParams = &RollingDeploymentStrategyParams{}
+ }
+ if err := m.RollingParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ActiveDeadlineSeconds = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Automatic", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Automatic = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerNames = append(m.ContainerNames, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImage", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LastTriggeredImage = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentTriggerPolicies: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentTriggerPolicies: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ *m = append(*m, DeploymentTriggerPolicy{})
+ if err := (*m)[len(*m)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeploymentTriggerPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeploymentTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeParams", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageChangeParams == nil {
+ m.ImageChangeParams = &DeploymentTriggerImageChangeParams{}
+ }
+ if err := m.ImageChangeParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExecNewPodHook: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExecNewPodHook: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, v1.EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Volumes = append(m.Volumes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LifecycleHook) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LifecycleHook: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LifecycleHook: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FailurePolicy = LifecycleHookFailurePolicy(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExecNewPod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExecNewPod == nil {
+ m.ExecNewPod = &ExecNewPodHook{}
+ }
+ if err := m.ExecNewPod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TagImages", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TagImages = append(m.TagImages, TagImageHook{})
+ if err := m.TagImages[len(m.TagImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RecreateDeploymentStrategyParams: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RecreateDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TimeoutSeconds = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pre == nil {
+ m.Pre = &LifecycleHook{}
+ }
+ if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mid", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Mid == nil {
+ m.Mid = &LifecycleHook{}
+ }
+ if err := m.Mid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Post == nil {
+ m.Post = &LifecycleHook{}
+ }
+ if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RollingDeploymentStrategyParams: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RollingDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpdatePeriodSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.UpdatePeriodSeconds = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IntervalSeconds = &v
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TimeoutSeconds = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxUnavailable == nil {
+ m.MaxUnavailable = &intstr.IntOrString{}
+ }
+ if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MaxSurge == nil {
+ m.MaxSurge = &intstr.IntOrString{}
+ }
+ if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pre == nil {
+ m.Pre = &LifecycleHook{}
+ }
+ if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Post == nil {
+ m.Post = &LifecycleHook{}
+ }
+ if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TagImageHook) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TagImageHook: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TagImageHook: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContainerName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto
new file mode 100644
index 0000000000..ddf28f6004
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/generated.proto
@@ -0,0 +1,490 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.apps.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/apps/v1";
+
+// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
+message CustomDeploymentStrategyParams {
+ // Image specifies a container image which can carry out a deployment.
+ optional string image = 1;
+
+ // Environment holds the environment which will be given to the container for Image.
+ repeated k8s.io.api.core.v1.EnvVar environment = 2;
+
+ // Command is optional and overrides CMD in the container Image.
+ repeated string command = 3;
+}
+
+// DeploymentCause captures information about a particular cause of a deployment.
+message DeploymentCause {
+ // Type of the trigger that resulted in the creation of a new deployment
+ optional string type = 1;
+
+ // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
+ optional DeploymentCauseImageTrigger imageTrigger = 2;
+}
+
+// DeploymentCauseImageTrigger represents details about the cause of a deployment originating
+// from an image change trigger
+message DeploymentCauseImageTrigger {
+ // From is a reference to the changed object which triggered a deployment. The field may have
+ // the kinds DockerImage, ImageStreamTag, or ImageStreamImage.
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+}
+
+// DeploymentCondition describes the state of a deployment config at a certain point.
+message DeploymentCondition {
+ // Type of deployment condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time this condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+ // The last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ optional string message = 5;
+}
+
+// Deployment Configs define the template for a pod and manages deploying new images or configuration changes.
+// A single deployment configuration is usually analogous to a single micro-service. Can support many different
+// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as
+// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.
+//
+// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed.
+// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment
+// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment
+// is triggered by any means.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// Deprecated: Use deployments or other means for declarative updates for pods instead.
+// +openshift:compatibility-gen:level=1
+message DeploymentConfig {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec represents a desired deployment state and how to deploy to it.
+ optional DeploymentConfigSpec spec = 2;
+
+ // Status represents the current deployment state.
+ // +optional
+ optional DeploymentConfigStatus status = 3;
+}
+
+// DeploymentConfigList is a collection of deployment configs.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message DeploymentConfigList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of deployment configs
+ repeated DeploymentConfig items = 2;
+}
+
+// DeploymentConfigRollback provides the input to rollback generation.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message DeploymentConfigRollback {
+ // Name of the deployment config that will be rolled back.
+ optional string name = 1;
+
+ // UpdatedAnnotations is a set of new annotations that will be added in the deployment config.
+ map updatedAnnotations = 2;
+
+ // Spec defines the options to rollback generation.
+ optional DeploymentConfigRollbackSpec spec = 3;
+}
+
+// DeploymentConfigRollbackSpec represents the options for rollback generation.
+message DeploymentConfigRollbackSpec {
+ // From points to a ReplicationController which is a deployment.
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+
+ // Revision to rollback to. If set to 0, rollback to the last revision.
+ optional int64 revision = 2;
+
+ // IncludeTriggers specifies whether to include config Triggers.
+ optional bool includeTriggers = 3;
+
+ // IncludeTemplate specifies whether to include the PodTemplateSpec.
+ optional bool includeTemplate = 4;
+
+ // IncludeReplicationMeta specifies whether to include the replica count and selector.
+ optional bool includeReplicationMeta = 5;
+
+ // IncludeStrategy specifies whether to include the deployment Strategy.
+ optional bool includeStrategy = 6;
+}
+
+// DeploymentConfigSpec represents the desired state of the deployment.
+message DeploymentConfigSpec {
+ // Strategy describes how a deployment is executed.
+ // +optional
+ optional DeploymentStrategy strategy = 1;
+
+ // MinReadySeconds is the minimum number of seconds for which a newly created pod should
+ // be ready without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ optional int32 minReadySeconds = 9;
+
+ // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
+ // are defined, a new deployment can only occur as a result of an explicit client update to the
+ // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.
+ // +optional
+ optional DeploymentTriggerPolicies triggers = 2;
+
+ // Replicas is the number of desired replicas.
+ // +optional
+ optional int32 replicas = 3;
+
+ // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
+ // This field is a pointer to allow for differentiation between an explicit zero and not specified.
+ // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)
+ optional int32 revisionHistoryLimit = 4;
+
+ // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
+ // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding
+ // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.
+ // +optional
+ optional bool test = 5;
+
+ // Paused indicates that the deployment config is paused resulting in no new deployments on template
+ // changes or changes in the template caused by other triggers.
+ optional bool paused = 6;
+
+ // Selector is a label query over pods that should match the Replicas count.
+ map selector = 7;
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ optional k8s.io.api.core.v1.PodTemplateSpec template = 8;
+}
+
+// DeploymentConfigStatus represents the current deployment state.
+message DeploymentConfigStatus {
+ // LatestVersion is used to determine whether the current deployment associated with a deployment
+ // config is out of sync.
+ optional int64 latestVersion = 1;
+
+ // ObservedGeneration is the most recent generation observed by the deployment config controller.
+ optional int64 observedGeneration = 2;
+
+ // Replicas is the total number of pods targeted by this deployment config.
+ optional int32 replicas = 3;
+
+ // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config
+ // that have the desired template spec.
+ optional int32 updatedReplicas = 4;
+
+ // AvailableReplicas is the total number of available pods targeted by this deployment config.
+ optional int32 availableReplicas = 5;
+
+ // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.
+ optional int32 unavailableReplicas = 6;
+
+ // Details are the reasons for the update to this deployment config.
+ // This could be based on a change made by the user or caused by an automatic trigger
+ optional DeploymentDetails details = 7;
+
+ // Conditions represents the latest available observations of a deployment config's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated DeploymentCondition conditions = 8;
+
+ // Total number of ready pods targeted by this deployment.
+ optional int32 readyReplicas = 9;
+}
+
+// DeploymentDetails captures information about the causes of a deployment.
+message DeploymentDetails {
+ // Message is the user specified change message, if this deployment was triggered manually by the user
+ optional string message = 1;
+
+ // Causes are extended data associated with all the causes for creating a new deployment
+ repeated DeploymentCause causes = 2;
+}
+
+// DeploymentLog represents the logs for a deployment
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message DeploymentLog {
+}
+
+// DeploymentLogOptions is the REST options for a deployment log
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message DeploymentLogOptions {
+ // The container for which to stream logs. Defaults to only container if there is one container in the pod.
+ optional string container = 1;
+
+ // Follow if true indicates that the build log should be streamed until
+ // the build terminates.
+ optional bool follow = 2;
+
+ // Return previous deployment logs. Defaults to false.
+ optional bool previous = 3;
+
+ // A relative time in seconds before the current time from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ optional int64 sinceSeconds = 4;
+
+ // An RFC3339 timestamp from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
+
+ // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+ // of log output. Defaults to false.
+ optional bool timestamps = 6;
+
+ // If set, the number of lines from the end of the logs to show. If not specified,
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ optional int64 tailLines = 7;
+
+ // If set, the number of bytes to read from the server before terminating the
+ // log output. This may not display a complete final line of logging, and may return
+ // slightly more or slightly less than the specified limit.
+ optional int64 limitBytes = 8;
+
+ // NoWait if true causes the call to return immediately even if the deployment
+ // is not available yet. Otherwise the server will wait until the deployment has started.
+ // TODO: Fix the tag to 'noWait' in v2
+ optional bool nowait = 9;
+
+ // Version of the deployment for which to view logs.
+ optional int64 version = 10;
+}
+
+// DeploymentRequest is a request to a deployment config for a new deployment.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message DeploymentRequest {
+ // Name of the deployment config for requesting a new deployment.
+ optional string name = 1;
+
+ // Latest will update the deployment config with the latest state from all triggers.
+ optional bool latest = 2;
+
+ // Force will try to force a new deployment to run. If the deployment config is paused,
+ // then setting this to true will return an Invalid error.
+ optional bool force = 3;
+
+ // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers.
+ // This field overrides the triggers from latest and allows clients to control specific
+ // logic. This field is ignored if not specified.
+ repeated string excludeTriggers = 4;
+}
+
+// DeploymentStrategy describes how to perform a deployment.
+message DeploymentStrategy {
+ // Type is the name of a deployment strategy.
+ // +optional
+ optional string type = 1;
+
+ // CustomParams are the input to the Custom deployment strategy, and may also
+ // be specified for the Recreate and Rolling strategies to customize the execution
+ // process that runs the deployment.
+ optional CustomDeploymentStrategyParams customParams = 2;
+
+ // RecreateParams are the input to the Recreate deployment strategy.
+ optional RecreateDeploymentStrategyParams recreateParams = 3;
+
+ // RollingParams are the input to the Rolling deployment strategy.
+ optional RollingDeploymentStrategyParams rollingParams = 4;
+
+ // Resources contains resource requirements to execute the deployment and any hooks.
+ optional k8s.io.api.core.v1.ResourceRequirements resources = 5;
+
+ // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ map labels = 6;
+
+ // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ map annotations = 7;
+
+ // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment
+ // config may be active on a node before the system actively tries to terminate them.
+ optional int64 activeDeadlineSeconds = 8;
+}
+
+// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.
+message DeploymentTriggerImageChangeParams {
+ // Automatic means that the detection of a new tag value should result in an image update
+ // inside the pod template.
+ optional bool automatic = 1;
+
+ // ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
+ // If multiple triggers point to the same containers, the resulting behavior is undefined. Future
+ // API versions will make this a validation error. If ContainerNames does not point to a valid container,
+ // the trigger will be ignored. Future API versions will make this a validation error.
+ repeated string containerNames = 2;
+
+ // From is a reference to an image stream tag to watch for changes. From.Name is the only
+ // required subfield - if From.Namespace is blank, the namespace of the current deployment
+ // trigger will be used.
+ optional k8s.io.api.core.v1.ObjectReference from = 3;
+
+ // LastTriggeredImage is the last image to be triggered.
+ optional string lastTriggeredImage = 4;
+}
+
+// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message DeploymentTriggerPolicies {
+ // items, if empty, will result in an empty slice
+
+ repeated DeploymentTriggerPolicy items = 1;
+}
+
+// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.
+message DeploymentTriggerPolicy {
+ // Type of the trigger
+ optional string type = 1;
+
+ // ImageChangeParams represents the parameters for the ImageChange trigger.
+ optional DeploymentTriggerImageChangeParams imageChangeParams = 2;
+}
+
+// ExecNewPodHook is a hook implementation which runs a command in a new pod
+// based on the specified container which is assumed to be part of the
+// deployment template.
+message ExecNewPodHook {
+ // Command is the action command and its arguments.
+ repeated string command = 1;
+
+ // Env is a set of environment variables to supply to the hook pod's container.
+ repeated k8s.io.api.core.v1.EnvVar env = 2;
+
+ // ContainerName is the name of a container in the deployment pod template
+ // whose container image will be used for the hook pod's container.
+ optional string containerName = 3;
+
+ // Volumes is a list of named volumes from the pod template which should be
+ // copied to the hook pod. Volumes names not found in pod spec are ignored.
+ // An empty list means no volumes will be copied.
+ repeated string volumes = 4;
+}
+
+// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.
+message LifecycleHook {
+ // FailurePolicy specifies what action to take if the hook fails.
+ optional string failurePolicy = 1;
+
+ // ExecNewPod specifies the options for a lifecycle hook backed by a pod.
+ optional ExecNewPodHook execNewPod = 2;
+
+ // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
+ repeated TagImageHook tagImages = 3;
+}
+
+// RecreateDeploymentStrategyParams are the input to the Recreate deployment
+// strategy.
+message RecreateDeploymentStrategyParams {
+ // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // value is nil, a default will be used.
+ optional int64 timeoutSeconds = 1;
+
+ // Pre is a lifecycle hook which is executed before the strategy manipulates
+ // the deployment. All LifecycleHookFailurePolicy values are supported.
+ optional LifecycleHook pre = 2;
+
+ // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
+ // pod is created. All LifecycleHookFailurePolicy values are supported.
+ optional LifecycleHook mid = 3;
+
+ // Post is a lifecycle hook which is executed after the strategy has
+ // finished all deployment logic. All LifecycleHookFailurePolicy values are supported.
+ optional LifecycleHook post = 4;
+}
+
+// RollingDeploymentStrategyParams are the input to the Rolling deployment
+// strategy.
+message RollingDeploymentStrategyParams {
+ // UpdatePeriodSeconds is the time to wait between individual pod updates.
+ // If the value is nil, a default will be used.
+ optional int64 updatePeriodSeconds = 1;
+
+ // IntervalSeconds is the time to wait between polling deployment status
+ // after update. If the value is nil, a default will be used.
+ optional int64 intervalSeconds = 2;
+
+ // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // value is nil, a default will be used.
+ optional int64 timeoutSeconds = 3;
+
+ // MaxUnavailable is the maximum number of pods that can be unavailable
+ // during the update. Value can be an absolute number (ex: 5) or a
+ // percentage of total pods at the start of update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding down.
+ //
+ // This cannot be 0 if MaxSurge is 0. By default, 25% is used.
+ //
+ // Example: when this is set to 30%, the old RC can be scaled down by 30%
+ // immediately when the rolling update starts. Once new pods are ready, old
+ // RC can be scaled down further, followed by scaling up the new RC,
+ // ensuring that at least 70% of original number of pods are available at
+ // all times during the update.
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 4;
+
+ // MaxSurge is the maximum number of pods that can be scheduled above the
+ // original number of pods. Value can be an absolute number (ex: 5) or a
+ // percentage of total pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ //
+ // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used.
+ //
+ // Example: when this is set to 30%, the new RC can be scaled up by 30%
+ // immediately when the rolling update starts. Once old pods have been
+ // killed, new RC can be scaled up further, ensuring that total number of
+ // pods running at any time during the update is atmost 130% of original
+ // pods.
+ optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 5;
+
+ // Pre is a lifecycle hook which is executed before the deployment process
+ // begins. All LifecycleHookFailurePolicy values are supported.
+ optional LifecycleHook pre = 7;
+
+ // Post is a lifecycle hook which is executed after the strategy has
+ // finished all deployment logic. All LifecycleHookFailurePolicy values
+ // are supported.
+ optional LifecycleHook post = 8;
+}
+
+// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.
+message TagImageHook {
+ // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
+ // container this value will be defaulted to the name of that container.
+ optional string containerName = 1;
+
+ // To is the target ImageStreamTag to set the container's image onto.
+ optional k8s.io.api.core.v1.ObjectReference to = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/apps/v1/legacy.go b/vendor/github.com/openshift/api/apps/v1/legacy.go
new file mode 100644
index 0000000000..c8fa0ed999
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/legacy.go
@@ -0,0 +1,28 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &DeploymentConfig{},
+ &DeploymentConfigList{},
+ &DeploymentConfigRollback{},
+ &DeploymentRequest{},
+ &DeploymentLog{},
+ &DeploymentLogOptions{},
+ &extensionsv1beta1.Scale{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/apps/v1/register.go b/vendor/github.com/openshift/api/apps/v1/register.go
new file mode 100644
index 0000000000..0c1e47e6d4
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/register.go
@@ -0,0 +1,45 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "apps.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &DeploymentConfig{},
+ &DeploymentConfigList{},
+ &DeploymentConfigRollback{},
+ &DeploymentRequest{},
+ &DeploymentLog{},
+ &DeploymentLogOptions{},
+ &extensionsv1beta1.Scale{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go
new file mode 100644
index 0000000000..1465aea278
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/types.go
@@ -0,0 +1,537 @@
+package v1
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// +genclient
+// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=DeploymentRequest
+// +genclient:method=Rollback,verb=create,subresource=rollback,input=DeploymentConfigRollback
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=3.0
+// +k8s:prerelease-lifecycle-gen:deprecated=4.14
+// +k8s:prerelease-lifecycle-gen:removed=4.10000
+
+// Deployment Configs define the template for a pod and manages deploying new images or configuration changes.
+// A single deployment configuration is usually analogous to a single micro-service. Can support many different
+// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as
+// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.
+//
+// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed.
+// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment
+// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment
+// is triggered by any means.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// Deprecated: Use deployments or other means for declarative updates for pods instead.
+// +openshift:compatibility-gen:level=1
+type DeploymentConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec represents a desired deployment state and how to deploy to it.
+ Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status represents the current deployment state.
+ // +optional
+ Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// DeploymentConfigSpec represents the desired state of the deployment.
+type DeploymentConfigSpec struct {
+ // Strategy describes how a deployment is executed.
+ // +optional
+ Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"`
+
+ // MinReadySeconds is the minimum number of seconds for which a newly created pod should
+ // be ready without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
+
+ // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
+ // are defined, a new deployment can only occur as a result of an explicit client update to the
+ // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.
+ // +optional
+ Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"`
+
+ // Replicas is the number of desired replicas.
+ // +optional
+ Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"`
+
+ // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
+ // This field is a pointer to allow for differentiation between an explicit zero and not specified.
+ // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"`
+
+ // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
+ // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding
+ // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.
+ // +optional
+ Test bool `json:"test" protobuf:"varint,5,opt,name=test"`
+
+ // Paused indicates that the deployment config is paused resulting in no new deployments on template
+ // changes or changes in the template caused by other triggers.
+ Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"`
+
+ // Selector is a label query over pods that should match the Replicas count.
+ Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"`
+
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"`
+}
+
+// DeploymentStrategy describes how to perform a deployment.
+type DeploymentStrategy struct {
+ // Type is the name of a deployment strategy.
+ // +optional
+ Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
+
+ // CustomParams are the input to the Custom deployment strategy, and may also
+ // be specified for the Recreate and Rolling strategies to customize the execution
+ // process that runs the deployment.
+ CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"`
+ // RecreateParams are the input to the Recreate deployment strategy.
+ RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"`
+ // RollingParams are the input to the Rolling deployment strategy.
+ RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"`
+
+ // Resources contains resource requirements to execute the deployment and any hooks.
+ Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"`
+ // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"`
+ // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
+ Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"`
+
+ // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment
+ // config may be active on a node before the system actively tries to terminate them.
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"`
+}
+
+// DeploymentStrategyType refers to a specific DeploymentStrategy implementation.
+type DeploymentStrategyType string
+
+const (
+ // DeploymentStrategyTypeRecreate is a simple strategy suitable as a default.
+ DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate"
+ // DeploymentStrategyTypeCustom is a user defined strategy.
+ DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom"
+ // DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater.
+ DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling"
+)
+
+// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
+type CustomDeploymentStrategyParams struct {
+ // Image specifies a container image which can carry out a deployment.
+ Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
+ // Environment holds the environment which will be given to the container for Image.
+ Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"`
+ // Command is optional and overrides CMD in the container Image.
+ Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
+}
+
+// RecreateDeploymentStrategyParams are the input to the Recreate deployment
+// strategy.
+type RecreateDeploymentStrategyParams struct {
+ // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // value is nil, a default will be used.
+ TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
+ // Pre is a lifecycle hook which is executed before the strategy manipulates
+ // the deployment. All LifecycleHookFailurePolicy values are supported.
+ Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"`
+ // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
+ // pod is created. All LifecycleHookFailurePolicy values are supported.
+ Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"`
+ // Post is a lifecycle hook which is executed after the strategy has
+ // finished all deployment logic. All LifecycleHookFailurePolicy values are supported.
+ Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"`
+}
+
+// RollingDeploymentStrategyParams are the input to the Rolling deployment
+// strategy.
+type RollingDeploymentStrategyParams struct {
+ // UpdatePeriodSeconds is the time to wait between individual pod updates.
+ // If the value is nil, a default will be used.
+ UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"`
+ // IntervalSeconds is the time to wait between polling deployment status
+ // after update. If the value is nil, a default will be used.
+ IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"`
+ // TimeoutSeconds is the time to wait for updates before giving up. If the
+ // value is nil, a default will be used.
+ TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
+ // MaxUnavailable is the maximum number of pods that can be unavailable
+ // during the update. Value can be an absolute number (ex: 5) or a
+ // percentage of total pods at the start of update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding down.
+ //
+ // This cannot be 0 if MaxSurge is 0. By default, 25% is used.
+ //
+ // Example: when this is set to 30%, the old RC can be scaled down by 30%
+ // immediately when the rolling update starts. Once new pods are ready, old
+ // RC can be scaled down further, followed by scaling up the new RC,
+ // ensuring that at least 70% of original number of pods are available at
+ // all times during the update.
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"`
+ // MaxSurge is the maximum number of pods that can be scheduled above the
+ // original number of pods. Value can be an absolute number (ex: 5) or a
+ // percentage of total pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ //
+ // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used.
+ //
+ // Example: when this is set to 30%, the new RC can be scaled up by 30%
+ // immediately when the rolling update starts. Once old pods have been
+ // killed, new RC can be scaled up further, ensuring that total number of
+ // pods running at any time during the update is atmost 130% of original
+ // pods.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"`
+ // Pre is a lifecycle hook which is executed before the deployment process
+ // begins. All LifecycleHookFailurePolicy values are supported.
+ Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"`
+ // Post is a lifecycle hook which is executed after the strategy has
+ // finished all deployment logic. All LifecycleHookFailurePolicy values
+ // are supported.
+ Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"`
+}
+
+// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.
+type LifecycleHook struct {
+ // FailurePolicy specifies what action to take if the hook fails.
+ FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"`
+
+ // ExecNewPod specifies the options for a lifecycle hook backed by a pod.
+ ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"`
+
+ // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
+ TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"`
+}
+
+// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails.
+type LifecycleHookFailurePolicy string
+
+const (
+ // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds.
+ LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry"
+ // LifecycleHookFailurePolicyAbort means abort the deployment.
+ LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort"
+ // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment.
+ LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore"
+)
+
+// ExecNewPodHook is a hook implementation which runs a command in a new pod
+// based on the specified container which is assumed to be part of the
+// deployment template.
+type ExecNewPodHook struct {
+ // Command is the action command and its arguments.
+ Command []string `json:"command" protobuf:"bytes,1,rep,name=command"`
+ // Env is a set of environment variables to supply to the hook pod's container.
+ Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"`
+ // ContainerName is the name of a container in the deployment pod template
+ // whose container image will be used for the hook pod's container.
+ ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"`
+ // Volumes is a list of named volumes from the pod template which should be
+ // copied to the hook pod. Volumes names not found in pod spec are ignored.
+ // An empty list means no volumes will be copied.
+ Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"`
+}
+
+// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.
+type TagImageHook struct {
+ // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
+ // container this value will be defaulted to the name of that container.
+ ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"`
+ // To is the target ImageStreamTag to set the container's image onto.
+ To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"`
+}
+
+// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type DeploymentTriggerPolicies []DeploymentTriggerPolicy
+
+func (t DeploymentTriggerPolicies) String() string {
+ return fmt.Sprintf("%v", []DeploymentTriggerPolicy(t))
+}
+
+// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.
+type DeploymentTriggerPolicy struct {
+ // Type of the trigger
+ Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
+ // ImageChangeParams represents the parameters for the ImageChange trigger.
+ ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"`
+}
+
+// DeploymentTriggerType refers to a specific DeploymentTriggerPolicy implementation.
+type DeploymentTriggerType string
+
+const (
+ // DeploymentTriggerOnImageChange will create new deployments in response to updated tags from
+ // a container image repository.
+ DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange"
+ // DeploymentTriggerOnConfigChange will create new deployments in response to changes to
+ // the ControllerTemplate of a DeploymentConfig.
+ DeploymentTriggerOnConfigChange DeploymentTriggerType = "ConfigChange"
+)
+
+// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.
+type DeploymentTriggerImageChangeParams struct {
+ // Automatic means that the detection of a new tag value should result in an image update
+ // inside the pod template.
+ Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"`
+ // ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
+ // If multiple triggers point to the same containers, the resulting behavior is undefined. Future
+ // API versions will make this a validation error. If ContainerNames does not point to a valid container,
+ // the trigger will be ignored. Future API versions will make this a validation error.
+ ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"`
+ // From is a reference to an image stream tag to watch for changes. From.Name is the only
+ // required subfield - if From.Namespace is blank, the namespace of the current deployment
+ // trigger will be used.
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"`
+ // LastTriggeredImage is the last image to be triggered.
+ LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"`
+}
+
+// DeploymentConfigStatus represents the current deployment state.
+type DeploymentConfigStatus struct {
+ // LatestVersion is used to determine whether the current deployment associated with a deployment
+ // config is out of sync.
+ LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"`
+ // ObservedGeneration is the most recent generation observed by the deployment config controller.
+ ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"`
+ // Replicas is the total number of pods targeted by this deployment config.
+ Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"`
+ // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config
+ // that have the desired template spec.
+ UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"`
+ // AvailableReplicas is the total number of available pods targeted by this deployment config.
+ AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"`
+ // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.
+ UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"`
+ // Details are the reasons for the update to this deployment config.
+ // This could be based on a change made by the user or caused by an automatic trigger
+ Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"`
+ // Conditions represents the latest available observations of a deployment config's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"`
+ // Total number of ready pods targeted by this deployment.
+ ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"`
+}
+
+// DeploymentDetails captures information about the causes of a deployment.
+type DeploymentDetails struct {
+ // Message is the user specified change message, if this deployment was triggered manually by the user
+ Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"`
+ // Causes are extended data associated with all the causes for creating a new deployment
+ Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"`
+}
+
+// DeploymentCause captures information about a particular cause of a deployment.
+type DeploymentCause struct {
+ // Type of the trigger that resulted in the creation of a new deployment
+ Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
+ // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
+ ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"`
+}
+
+// DeploymentCauseImageTrigger represents details about the cause of a deployment originating
+// from an image change trigger
+type DeploymentCauseImageTrigger struct {
+ // From is a reference to the changed object which triggered a deployment. The field may have
+ // the kinds DockerImage, ImageStreamTag, or ImageStreamImage.
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
+}
+
+type DeploymentConditionType string
+
+// These are valid conditions of a DeploymentConfig.
+const (
+ // DeploymentAvailable means the DeploymentConfig is available, ie. at least the minimum available
+ // replicas required (dc.spec.replicas in case the DeploymentConfig is of Recreate type,
+ // dc.spec.replicas - dc.spec.strategy.rollingParams.maxUnavailable in case it's Rolling) are up and
+ // running for at least dc.spec.minReadySeconds.
+ DeploymentAvailable DeploymentConditionType = "Available"
+ // DeploymentProgressing is:
+ // * True: the DeploymentConfig has been successfully deployed or is amidst getting deployed.
+ // The two different states can be determined by looking at the Reason of the Condition.
+ // For example, a complete DC will have {Status: True, Reason: NewReplicationControllerAvailable}
+ // and a DC in the middle of a rollout {Status: True, Reason: ReplicationControllerUpdated}.
+ // TODO: Represent a successfully deployed DC by using something else for Status like Unknown?
+ // * False: the DeploymentConfig has failed to deploy its latest version.
+ //
+ // This condition is purely informational and depends on the dc.spec.strategy.*params.timeoutSeconds
+ // field, which is responsible for the time in seconds to wait for a rollout before deciding that
+ // no progress can be made, thus the rollout is aborted.
+ //
+ // Progress for a DeploymentConfig is considered when new pods scale up or old pods scale down.
+ DeploymentProgressing DeploymentConditionType = "Progressing"
+ // DeploymentReplicaFailure is added in a deployment config when one of its pods
+ // fails to be created or deleted.
+ DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
+)
+
+// DeploymentCondition describes the state of a deployment config at a certain point.
+type DeploymentCondition struct {
+ // Type of deployment condition.
+ Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
+ // The last time this condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+ // The last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=3.0
+// +k8s:prerelease-lifecycle-gen:deprecated=4.14
+// +k8s:prerelease-lifecycle-gen:removed=4.10000
+
+// DeploymentConfigList is a collection of deployment configs.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DeploymentConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of deployment configs
+ Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=3.0
+// +k8s:prerelease-lifecycle-gen:deprecated=4.14
+// +k8s:prerelease-lifecycle-gen:removed=4.10000
+
+// DeploymentConfigRollback provides the input to rollback generation.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DeploymentConfigRollback struct {
+ metav1.TypeMeta `json:",inline"`
+ // Name of the deployment config that will be rolled back.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // UpdatedAnnotations is a set of new annotations that will be added in the deployment config.
+ UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"`
+ // Spec defines the options to rollback generation.
+ Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"`
+}
+
+// DeploymentConfigRollbackSpec represents the options for rollback generation.
+type DeploymentConfigRollbackSpec struct {
+ // From points to a ReplicationController which is a deployment.
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
+ // Revision to rollback to. If set to 0, rollback to the last revision.
+ Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"`
+ // IncludeTriggers specifies whether to include config Triggers.
+ IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"`
+ // IncludeTemplate specifies whether to include the PodTemplateSpec.
+ IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"`
+ // IncludeReplicationMeta specifies whether to include the replica count and selector.
+ IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"`
+ // IncludeStrategy specifies whether to include the deployment Strategy.
+ IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=3.0
+// +k8s:prerelease-lifecycle-gen:deprecated=4.14
+// +k8s:prerelease-lifecycle-gen:removed=4.10000
+
+// DeploymentRequest is a request to a deployment config for a new deployment.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DeploymentRequest struct {
+ metav1.TypeMeta `json:",inline"`
+ // Name of the deployment config for requesting a new deployment.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Latest will update the deployment config with the latest state from all triggers.
+ Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"`
+ // Force will try to force a new deployment to run. If the deployment config is paused,
+ // then setting this to true will return an Invalid error.
+ Force bool `json:"force" protobuf:"varint,3,opt,name=force"`
+ // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers.
+ // This field overrides the triggers from latest and allows clients to control specific
+ // logic. This field is ignored if not specified.
+ ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=3.0
+// +k8s:prerelease-lifecycle-gen:deprecated=4.14
+// +k8s:prerelease-lifecycle-gen:removed=4.10000
+
+// DeploymentLog represents the logs for a deployment
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DeploymentLog struct {
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=3.0
+// +k8s:prerelease-lifecycle-gen:deprecated=4.14
+// +k8s:prerelease-lifecycle-gen:removed=4.10000
+
+// DeploymentLogOptions is the REST options for a deployment log
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DeploymentLogOptions struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // The container for which to stream logs. Defaults to only container if there is one container in the pod.
+ Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
+ // Follow if true indicates that the build log should be streamed until
+ // the build terminates.
+ Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
+ // Return previous deployment logs. Defaults to false.
+ Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
+ // A relative time in seconds before the current time from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
+ // An RFC3339 timestamp from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
+ // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+ // of log output. Defaults to false.
+ Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
+ // If set, the number of lines from the end of the logs to show. If not specified,
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
+ // If set, the number of bytes to read from the server before terminating the
+ // log output. This may not display a complete final line of logging, and may return
+ // slightly more or slightly less than the specified limit.
+ LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
+
+ // NoWait if true causes the call to return immediately even if the deployment
+ // is not available yet. Otherwise the server will wait until the deployment has started.
+ // TODO: Fix the tag to 'noWait' in v2
+ NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"`
+
+ // Version of the deployment for which to view logs.
+ Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"`
+}
diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..11c22a80f3
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go
@@ -0,0 +1,682 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStrategyParams) {
+ *out = *in
+ if in.Environment != nil {
+ in, out := &in.Environment, &out.Environment
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Command != nil {
+ in, out := &in.Command, &out.Command
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploymentStrategyParams.
+func (in *CustomDeploymentStrategyParams) DeepCopy() *CustomDeploymentStrategyParams {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomDeploymentStrategyParams)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) {
+ *out = *in
+ if in.ImageTrigger != nil {
+ in, out := &in.ImageTrigger, &out.ImageTrigger
+ *out = new(DeploymentCauseImageTrigger)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCause.
+func (in *DeploymentCause) DeepCopy() *DeploymentCause {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCauseImageTrigger) DeepCopyInto(out *DeploymentCauseImageTrigger) {
+ *out = *in
+ out.From = in.From
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCauseImageTrigger.
+func (in *DeploymentCauseImageTrigger) DeepCopy() *DeploymentCauseImageTrigger {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentCauseImageTrigger)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
+func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig.
+func (in *DeploymentConfig) DeepCopy() *DeploymentConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DeploymentConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigList.
+func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentConfigRollback) DeepCopyInto(out *DeploymentConfigRollback) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.UpdatedAnnotations != nil {
+ in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.Spec = in.Spec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollback.
+func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentConfigRollback)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentConfigRollbackSpec) DeepCopyInto(out *DeploymentConfigRollbackSpec) {
+ *out = *in
+ out.From = in.From
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollbackSpec.
+func (in *DeploymentConfigRollbackSpec) DeepCopy() *DeploymentConfigRollbackSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentConfigRollbackSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) {
+ *out = *in
+ in.Strategy.DeepCopyInto(&out.Strategy)
+ if in.Triggers != nil {
+ in, out := &in.Triggers, &out.Triggers
+ *out = make(DeploymentTriggerPolicies, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RevisionHistoryLimit != nil {
+ in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Template != nil {
+ in, out := &in.Template, &out.Template
+ *out = new(corev1.PodTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigSpec.
+func (in *DeploymentConfigSpec) DeepCopy() *DeploymentConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) {
+ *out = *in
+ if in.Details != nil {
+ in, out := &in.Details, &out.Details
+ *out = new(DeploymentDetails)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]DeploymentCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigStatus.
+func (in *DeploymentConfigStatus) DeepCopy() *DeploymentConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) {
+ *out = *in
+ if in.Causes != nil {
+ in, out := &in.Causes, &out.Causes
+ *out = make([]DeploymentCause, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentDetails.
+func (in *DeploymentDetails) DeepCopy() *DeploymentDetails {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentDetails)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentLog) DeepCopyInto(out *DeploymentLog) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLog.
+func (in *DeploymentLog) DeepCopy() *DeploymentLog {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentLog)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentLog) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.SinceSeconds != nil {
+ in, out := &in.SinceSeconds, &out.SinceSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.SinceTime != nil {
+ in, out := &in.SinceTime, &out.SinceTime
+ *out = (*in).DeepCopy()
+ }
+ if in.TailLines != nil {
+ in, out := &in.TailLines, &out.TailLines
+ *out = new(int64)
+ **out = **in
+ }
+ if in.LimitBytes != nil {
+ in, out := &in.LimitBytes, &out.LimitBytes
+ *out = new(int64)
+ **out = **in
+ }
+ if in.Version != nil {
+ in, out := &in.Version, &out.Version
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLogOptions.
+func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentLogOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentRequest) DeepCopyInto(out *DeploymentRequest) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.ExcludeTriggers != nil {
+ in, out := &in.ExcludeTriggers, &out.ExcludeTriggers
+ *out = make([]DeploymentTriggerType, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRequest.
+func (in *DeploymentRequest) DeepCopy() *DeploymentRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeploymentRequest) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
+ *out = *in
+ if in.CustomParams != nil {
+ in, out := &in.CustomParams, &out.CustomParams
+ *out = new(CustomDeploymentStrategyParams)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RecreateParams != nil {
+ in, out := &in.RecreateParams, &out.RecreateParams
+ *out = new(RecreateDeploymentStrategyParams)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RollingParams != nil {
+ in, out := &in.RollingParams, &out.RollingParams
+ *out = new(RollingDeploymentStrategyParams)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Resources.DeepCopyInto(&out.Resources)
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ActiveDeadlineSeconds != nil {
+ in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
+func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) {
+ *out = *in
+ if in.ContainerNames != nil {
+ in, out := &in.ContainerNames, &out.ContainerNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.From = in.From
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerImageChangeParams.
+func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImageChangeParams {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentTriggerImageChangeParams)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) {
+ {
+ in := &in
+ *out = make(DeploymentTriggerPolicies, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies.
+func (in DeploymentTriggerPolicies) DeepCopy() DeploymentTriggerPolicies {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentTriggerPolicies)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) {
+ *out = *in
+ if in.ImageChangeParams != nil {
+ in, out := &in.ImageChangeParams, &out.ImageChangeParams
+ *out = new(DeploymentTriggerImageChangeParams)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicy.
+func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(DeploymentTriggerPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) {
+ *out = *in
+ if in.Command != nil {
+ in, out := &in.Command, &out.Command
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecNewPodHook.
+func (in *ExecNewPodHook) DeepCopy() *ExecNewPodHook {
+ if in == nil {
+ return nil
+ }
+ out := new(ExecNewPodHook)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) {
+ *out = *in
+ if in.ExecNewPod != nil {
+ in, out := &in.ExecNewPod, &out.ExecNewPod
+ *out = new(ExecNewPodHook)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TagImages != nil {
+ in, out := &in.TagImages, &out.TagImages
+ *out = make([]TagImageHook, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook.
+func (in *LifecycleHook) DeepCopy() *LifecycleHook {
+ if in == nil {
+ return nil
+ }
+ out := new(LifecycleHook)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) {
+ *out = *in
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.Pre != nil {
+ in, out := &in.Pre, &out.Pre
+ *out = new(LifecycleHook)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Mid != nil {
+ in, out := &in.Mid, &out.Mid
+ *out = new(LifecycleHook)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Post != nil {
+ in, out := &in.Post, &out.Post
+ *out = new(LifecycleHook)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecreateDeploymentStrategyParams.
+func (in *RecreateDeploymentStrategyParams) DeepCopy() *RecreateDeploymentStrategyParams {
+ if in == nil {
+ return nil
+ }
+ out := new(RecreateDeploymentStrategyParams)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentStrategyParams) {
+ *out = *in
+ if in.UpdatePeriodSeconds != nil {
+ in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.IntervalSeconds != nil {
+ in, out := &in.IntervalSeconds, &out.IntervalSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.TimeoutSeconds != nil {
+ in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxSurge != nil {
+ in, out := &in.MaxSurge, &out.MaxSurge
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.Pre != nil {
+ in, out := &in.Pre, &out.Pre
+ *out = new(LifecycleHook)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Post != nil {
+ in, out := &in.Post, &out.Post
+ *out = new(LifecycleHook)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingDeploymentStrategyParams.
+func (in *RollingDeploymentStrategyParams) DeepCopy() *RollingDeploymentStrategyParams {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingDeploymentStrategyParams)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TagImageHook) DeepCopyInto(out *TagImageHook) {
+ *out = *in
+ out.To = in.To
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImageHook.
+func (in *TagImageHook) DeepCopy() *TagImageHook {
+ if in == nil {
+ return nil
+ }
+ out := new(TagImageHook)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..ab137d59be
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,284 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_CustomDeploymentStrategyParams = map[string]string{
+ "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.",
+ "image": "Image specifies a container image which can carry out a deployment.",
+ "environment": "Environment holds the environment which will be given to the container for Image.",
+ "command": "Command is optional and overrides CMD in the container Image.",
+}
+
+func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string {
+ return map_CustomDeploymentStrategyParams
+}
+
+var map_DeploymentCause = map[string]string{
+ "": "DeploymentCause captures information about a particular cause of a deployment.",
+ "type": "Type of the trigger that resulted in the creation of a new deployment",
+ "imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change",
+}
+
+func (DeploymentCause) SwaggerDoc() map[string]string {
+ return map_DeploymentCause
+}
+
+var map_DeploymentCauseImageTrigger = map[string]string{
+ "": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger",
+ "from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.",
+}
+
+func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string {
+ return map_DeploymentCauseImageTrigger
+}
+
+var map_DeploymentCondition = map[string]string{
+ "": "DeploymentCondition describes the state of a deployment config at a certain point.",
+ "type": "Type of deployment condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastUpdateTime": "The last time this condition was updated.",
+ "lastTransitionTime": "The last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (DeploymentCondition) SwaggerDoc() map[string]string {
+ return map_DeploymentCondition
+}
+
+var map_DeploymentConfig = map[string]string{
+ "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Deprecated: Use deployments or other means for declarative updates for pods instead.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec represents a desired deployment state and how to deploy to it.",
+ "status": "Status represents the current deployment state.",
+}
+
+func (DeploymentConfig) SwaggerDoc() map[string]string {
+ return map_DeploymentConfig
+}
+
+var map_DeploymentConfigList = map[string]string{
+ "": "DeploymentConfigList is a collection of deployment configs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of deployment configs",
+}
+
+func (DeploymentConfigList) SwaggerDoc() map[string]string {
+ return map_DeploymentConfigList
+}
+
+var map_DeploymentConfigRollback = map[string]string{
+ "": "DeploymentConfigRollback provides the input to rollback generation.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "name": "Name of the deployment config that will be rolled back.",
+ "updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.",
+ "spec": "Spec defines the options to rollback generation.",
+}
+
+func (DeploymentConfigRollback) SwaggerDoc() map[string]string {
+ return map_DeploymentConfigRollback
+}
+
+var map_DeploymentConfigRollbackSpec = map[string]string{
+ "": "DeploymentConfigRollbackSpec represents the options for rollback generation.",
+ "from": "From points to a ReplicationController which is a deployment.",
+ "revision": "Revision to rollback to. If set to 0, rollback to the last revision.",
+ "includeTriggers": "IncludeTriggers specifies whether to include config Triggers.",
+ "includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.",
+ "includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.",
+ "includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.",
+}
+
+func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string {
+ return map_DeploymentConfigRollbackSpec
+}
+
+var map_DeploymentConfigSpec = map[string]string{
+ "": "DeploymentConfigSpec represents the desired state of the deployment.",
+ "strategy": "Strategy describes how a deployment is executed.",
+ "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.",
+ "replicas": "Replicas is the number of desired replicas.",
+ "revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)",
+ "test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.",
+ "paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.",
+ "selector": "Selector is a label query over pods that should match the Replicas count.",
+ "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.",
+}
+
+func (DeploymentConfigSpec) SwaggerDoc() map[string]string {
+ return map_DeploymentConfigSpec
+}
+
+var map_DeploymentConfigStatus = map[string]string{
+ "": "DeploymentConfigStatus represents the current deployment state.",
+ "latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.",
+ "observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.",
+ "replicas": "Replicas is the total number of pods targeted by this deployment config.",
+ "updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.",
+ "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.",
+ "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.",
+ "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger",
+ "conditions": "Conditions represents the latest available observations of a deployment config's current state.",
+ "readyReplicas": "Total number of ready pods targeted by this deployment.",
+}
+
+func (DeploymentConfigStatus) SwaggerDoc() map[string]string {
+ return map_DeploymentConfigStatus
+}
+
+var map_DeploymentDetails = map[string]string{
+ "": "DeploymentDetails captures information about the causes of a deployment.",
+ "message": "Message is the user specified change message, if this deployment was triggered manually by the user",
+ "causes": "Causes are extended data associated with all the causes for creating a new deployment",
+}
+
+func (DeploymentDetails) SwaggerDoc() map[string]string {
+ return map_DeploymentDetails
+}
+
+var map_DeploymentLog = map[string]string{
+ "": "DeploymentLog represents the logs for a deployment\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+}
+
+func (DeploymentLog) SwaggerDoc() map[string]string {
+ return map_DeploymentLog
+}
+
+var map_DeploymentLogOptions = map[string]string{
+ "": "DeploymentLogOptions is the REST options for a deployment log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
+ "follow": "Follow if true indicates that the build log should be streamed until the build terminates.",
+ "previous": "Return previous deployment logs. Defaults to false.",
+ "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+ "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+ "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
+ "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
+ "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
+ "nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.",
+ "version": "Version of the deployment for which to view logs.",
+}
+
+func (DeploymentLogOptions) SwaggerDoc() map[string]string {
+ return map_DeploymentLogOptions
+}
+
+var map_DeploymentRequest = map[string]string{
+ "": "DeploymentRequest is a request to a deployment config for a new deployment.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "name": "Name of the deployment config for requesting a new deployment.",
+ "latest": "Latest will update the deployment config with the latest state from all triggers.",
+ "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.",
+ "excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.",
+}
+
+func (DeploymentRequest) SwaggerDoc() map[string]string {
+ return map_DeploymentRequest
+}
+
+var map_DeploymentStrategy = map[string]string{
+ "": "DeploymentStrategy describes how to perform a deployment.",
+ "type": "Type is the name of a deployment strategy.",
+ "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.",
+ "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.",
+ "rollingParams": "RollingParams are the input to the Rolling deployment strategy.",
+ "resources": "Resources contains resource requirements to execute the deployment and any hooks.",
+ "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
+ "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
+ "activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.",
+}
+
+func (DeploymentStrategy) SwaggerDoc() map[string]string {
+ return map_DeploymentStrategy
+}
+
+var map_DeploymentTriggerImageChangeParams = map[string]string{
+ "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.",
+ "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.",
+ "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.",
+ "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.",
+ "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.",
+}
+
+func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string {
+ return map_DeploymentTriggerImageChangeParams
+}
+
+var map_DeploymentTriggerPolicy = map[string]string{
+ "": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.",
+ "type": "Type of the trigger",
+ "imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.",
+}
+
+func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string {
+ return map_DeploymentTriggerPolicy
+}
+
+var map_ExecNewPodHook = map[string]string{
+ "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.",
+ "command": "Command is the action command and its arguments.",
+ "env": "Env is a set of environment variables to supply to the hook pod's container.",
+ "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.",
+ "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.",
+}
+
+func (ExecNewPodHook) SwaggerDoc() map[string]string {
+ return map_ExecNewPodHook
+}
+
+var map_LifecycleHook = map[string]string{
+ "": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.",
+ "failurePolicy": "FailurePolicy specifies what action to take if the hook fails.",
+ "execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.",
+ "tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.",
+}
+
+func (LifecycleHook) SwaggerDoc() map[string]string {
+ return map_LifecycleHook
+}
+
+var map_RecreateDeploymentStrategyParams = map[string]string{
+ "": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.",
+ "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
+ "pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.",
+ "mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.",
+ "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
+}
+
+func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string {
+ return map_RecreateDeploymentStrategyParams
+}
+
+var map_RollingDeploymentStrategyParams = map[string]string{
+ "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.",
+ "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.",
+ "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.",
+ "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
+ "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.",
+ "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.",
+ "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.",
+ "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
+}
+
+func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string {
+ return map_RollingDeploymentStrategyParams
+}
+
+var map_TagImageHook = map[string]string{
+ "": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.",
+ "containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.",
+ "to": "To is the target ImageStreamTag to set the container's image onto.",
+}
+
+func (TagImageHook) SwaggerDoc() map[string]string {
+ return map_TagImageHook
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go
new file mode 100644
index 0000000000..b3e4de5010
--- /dev/null
+++ b/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go
@@ -0,0 +1,114 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeploymentConfig) APILifecycleIntroduced() (major, minor int) {
+ return 3, 0
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeploymentConfig) APILifecycleDeprecated() (major, minor int) {
+ return 4, 14
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeploymentConfig) APILifecycleRemoved() (major, minor int) {
+ return 4, 10000
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeploymentConfigList) APILifecycleIntroduced() (major, minor int) {
+ return 3, 0
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeploymentConfigList) APILifecycleDeprecated() (major, minor int) {
+ return 4, 14
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeploymentConfigList) APILifecycleRemoved() (major, minor int) {
+ return 4, 10000
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeploymentConfigRollback) APILifecycleIntroduced() (major, minor int) {
+ return 3, 0
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeploymentConfigRollback) APILifecycleDeprecated() (major, minor int) {
+ return 4, 14
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeploymentConfigRollback) APILifecycleRemoved() (major, minor int) {
+ return 4, 10000
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeploymentLog) APILifecycleIntroduced() (major, minor int) {
+ return 3, 0
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeploymentLog) APILifecycleDeprecated() (major, minor int) {
+ return 4, 14
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeploymentLog) APILifecycleRemoved() (major, minor int) {
+ return 4, 10000
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeploymentLogOptions) APILifecycleIntroduced() (major, minor int) {
+ return 3, 0
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeploymentLogOptions) APILifecycleDeprecated() (major, minor int) {
+ return 4, 14
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeploymentLogOptions) APILifecycleRemoved() (major, minor int) {
+ return 4, 10000
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *DeploymentRequest) APILifecycleIntroduced() (major, minor int) {
+ return 3, 0
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *DeploymentRequest) APILifecycleDeprecated() (major, minor int) {
+ return 4, 14
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *DeploymentRequest) APILifecycleRemoved() (major, minor int) {
+ return 4, 10000
+}
diff --git a/vendor/github.com/openshift/api/authorization/install.go b/vendor/github.com/openshift/api/authorization/install.go
new file mode 100644
index 0000000000..08ecc95f49
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/install.go
@@ -0,0 +1,26 @@
+package authorization
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ authorizationv1 "github.com/openshift/api/authorization/v1"
+)
+
+const (
+ GroupName = "authorization.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(authorizationv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/authorization/v1/Makefile b/vendor/github.com/openshift/api/authorization/v1/Makefile
new file mode 100644
index 0000000000..1e47c9fd97
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="authorization.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/authorization/v1/codec.go b/vendor/github.com/openshift/api/authorization/v1/codec.go
new file mode 100644
index 0000000000..61f1f9f514
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/codec.go
@@ -0,0 +1,139 @@
+package v1
+
+import (
+ "github.com/openshift/api/pkg/serialization"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+var _ runtime.NestedObjectDecoder = &PolicyRule{}
+var _ runtime.NestedObjectEncoder = &PolicyRule{}
+
+func (c *PolicyRule) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ serialization.DecodeNestedRawExtensionOrUnknown(d, &c.AttributeRestrictions)
+ return nil
+}
+func (c *PolicyRule) EncodeNestedObjects(e runtime.Encoder) error {
+ return serialization.EncodeNestedRawExtension(e, &c.AttributeRestrictions)
+}
+
+var _ runtime.NestedObjectDecoder = &SelfSubjectRulesReview{}
+var _ runtime.NestedObjectEncoder = &SelfSubjectRulesReview{}
+
+func (c *SelfSubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ for i := range c.Status.Rules {
+ c.Status.Rules[i].DecodeNestedObjects(d)
+ }
+ return nil
+}
+func (c *SelfSubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error {
+ for i := range c.Status.Rules {
+ if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var _ runtime.NestedObjectDecoder = &SubjectRulesReview{}
+var _ runtime.NestedObjectEncoder = &SubjectRulesReview{}
+
+func (c *SubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ for i := range c.Status.Rules {
+ c.Status.Rules[i].DecodeNestedObjects(d)
+ }
+ return nil
+}
+func (c *SubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error {
+ for i := range c.Status.Rules {
+ if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var _ runtime.NestedObjectDecoder = &ClusterRole{}
+var _ runtime.NestedObjectEncoder = &ClusterRole{}
+
+func (c *ClusterRole) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ for i := range c.Rules {
+ c.Rules[i].DecodeNestedObjects(d)
+ }
+ return nil
+}
+func (c *ClusterRole) EncodeNestedObjects(e runtime.Encoder) error {
+ for i := range c.Rules {
+ if err := c.Rules[i].EncodeNestedObjects(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var _ runtime.NestedObjectDecoder = &Role{}
+var _ runtime.NestedObjectEncoder = &Role{}
+
+func (c *Role) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ for i := range c.Rules {
+ c.Rules[i].DecodeNestedObjects(d)
+ }
+ return nil
+}
+func (c *Role) EncodeNestedObjects(e runtime.Encoder) error {
+ for i := range c.Rules {
+ if err := c.Rules[i].EncodeNestedObjects(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var _ runtime.NestedObjectDecoder = &ClusterRoleList{}
+var _ runtime.NestedObjectEncoder = &ClusterRoleList{}
+
+func (c *ClusterRoleList) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ for i := range c.Items {
+ c.Items[i].DecodeNestedObjects(d)
+ }
+ return nil
+}
+func (c *ClusterRoleList) EncodeNestedObjects(e runtime.Encoder) error {
+ for i := range c.Items {
+ if err := c.Items[i].EncodeNestedObjects(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var _ runtime.NestedObjectDecoder = &RoleList{}
+var _ runtime.NestedObjectEncoder = &RoleList{}
+
+func (c *RoleList) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ for i := range c.Items {
+ c.Items[i].DecodeNestedObjects(d)
+ }
+ return nil
+}
+func (c *RoleList) EncodeNestedObjects(e runtime.Encoder) error {
+ for i := range c.Items {
+ if err := c.Items[i].EncodeNestedObjects(e); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/authorization/v1/doc.go b/vendor/github.com/openshift/api/authorization/v1/doc.go
new file mode 100644
index 0000000000..a66741dce6
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/doc.go
@@ -0,0 +1,9 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/authorization/apis/authorization
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=authorization.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.pb.go b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go
new file mode 100644
index 0000000000..4a38ab6f76
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go
@@ -0,0 +1,8812 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/authorization/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ v12 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/rbac/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Action) Reset() { *m = Action{} }
+func (*Action) ProtoMessage() {}
+func (*Action) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{0}
+}
+func (m *Action) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Action) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Action.Merge(m, src)
+}
+func (m *Action) XXX_Size() int {
+ return m.Size()
+}
+func (m *Action) XXX_DiscardUnknown() {
+ xxx_messageInfo_Action.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Action proto.InternalMessageInfo
+
+func (m *ClusterRole) Reset() { *m = ClusterRole{} }
+func (*ClusterRole) ProtoMessage() {}
+func (*ClusterRole) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{1}
+}
+func (m *ClusterRole) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterRole) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterRole.Merge(m, src)
+}
+func (m *ClusterRole) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterRole) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterRole.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterRole proto.InternalMessageInfo
+
+func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
+func (*ClusterRoleBinding) ProtoMessage() {}
+func (*ClusterRoleBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{2}
+}
+func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterRoleBinding.Merge(m, src)
+}
+func (m *ClusterRoleBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterRoleBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo
+
+func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
+func (*ClusterRoleBindingList) ProtoMessage() {}
+func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{3}
+}
+func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterRoleBindingList.Merge(m, src)
+}
+func (m *ClusterRoleBindingList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterRoleBindingList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo
+
+func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
+func (*ClusterRoleList) ProtoMessage() {}
+func (*ClusterRoleList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{4}
+}
+func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterRoleList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterRoleList.Merge(m, src)
+}
+func (m *ClusterRoleList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterRoleList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterRoleList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo
+
+func (m *GroupRestriction) Reset() { *m = GroupRestriction{} }
+func (*GroupRestriction) ProtoMessage() {}
+func (*GroupRestriction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{5}
+}
+func (m *GroupRestriction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GroupRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GroupRestriction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GroupRestriction.Merge(m, src)
+}
+func (m *GroupRestriction) XXX_Size() int {
+ return m.Size()
+}
+func (m *GroupRestriction) XXX_DiscardUnknown() {
+ xxx_messageInfo_GroupRestriction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupRestriction proto.InternalMessageInfo
+
+func (m *IsPersonalSubjectAccessReview) Reset() { *m = IsPersonalSubjectAccessReview{} }
+func (*IsPersonalSubjectAccessReview) ProtoMessage() {}
+func (*IsPersonalSubjectAccessReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{6}
+}
+func (m *IsPersonalSubjectAccessReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IsPersonalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IsPersonalSubjectAccessReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IsPersonalSubjectAccessReview.Merge(m, src)
+}
+func (m *IsPersonalSubjectAccessReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *IsPersonalSubjectAccessReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_IsPersonalSubjectAccessReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IsPersonalSubjectAccessReview proto.InternalMessageInfo
+
+func (m *LocalResourceAccessReview) Reset() { *m = LocalResourceAccessReview{} }
+func (*LocalResourceAccessReview) ProtoMessage() {}
+func (*LocalResourceAccessReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{7}
+}
+func (m *LocalResourceAccessReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LocalResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LocalResourceAccessReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LocalResourceAccessReview.Merge(m, src)
+}
+func (m *LocalResourceAccessReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *LocalResourceAccessReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_LocalResourceAccessReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LocalResourceAccessReview proto.InternalMessageInfo
+
+func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} }
+func (*LocalSubjectAccessReview) ProtoMessage() {}
+func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{8}
+}
+func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src)
+}
+func (m *LocalSubjectAccessReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo
+
+func (m *NamedClusterRole) Reset() { *m = NamedClusterRole{} }
+func (*NamedClusterRole) ProtoMessage() {}
+func (*NamedClusterRole) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{9}
+}
+func (m *NamedClusterRole) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedClusterRole) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedClusterRole.Merge(m, src)
+}
+func (m *NamedClusterRole) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedClusterRole) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedClusterRole.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedClusterRole proto.InternalMessageInfo
+
+func (m *NamedClusterRoleBinding) Reset() { *m = NamedClusterRoleBinding{} }
+func (*NamedClusterRoleBinding) ProtoMessage() {}
+func (*NamedClusterRoleBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{10}
+}
+func (m *NamedClusterRoleBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedClusterRoleBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedClusterRoleBinding.Merge(m, src)
+}
+func (m *NamedClusterRoleBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedClusterRoleBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedClusterRoleBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedClusterRoleBinding proto.InternalMessageInfo
+
+func (m *NamedRole) Reset() { *m = NamedRole{} }
+func (*NamedRole) ProtoMessage() {}
+func (*NamedRole) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{11}
+}
+func (m *NamedRole) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedRole) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedRole.Merge(m, src)
+}
+func (m *NamedRole) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedRole) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedRole.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedRole proto.InternalMessageInfo
+
+func (m *NamedRoleBinding) Reset() { *m = NamedRoleBinding{} }
+func (*NamedRoleBinding) ProtoMessage() {}
+func (*NamedRoleBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{12}
+}
+func (m *NamedRoleBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedRoleBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedRoleBinding.Merge(m, src)
+}
+func (m *NamedRoleBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedRoleBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedRoleBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedRoleBinding proto.InternalMessageInfo
+
+func (m *OptionalNames) Reset() { *m = OptionalNames{} }
+func (*OptionalNames) ProtoMessage() {}
+func (*OptionalNames) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{13}
+}
+func (m *OptionalNames) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OptionalNames) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OptionalNames.Merge(m, src)
+}
+func (m *OptionalNames) XXX_Size() int {
+ return m.Size()
+}
+func (m *OptionalNames) XXX_DiscardUnknown() {
+ xxx_messageInfo_OptionalNames.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OptionalNames proto.InternalMessageInfo
+
+func (m *OptionalScopes) Reset() { *m = OptionalScopes{} }
+func (*OptionalScopes) ProtoMessage() {}
+func (*OptionalScopes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{14}
+}
+func (m *OptionalScopes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OptionalScopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OptionalScopes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OptionalScopes.Merge(m, src)
+}
+func (m *OptionalScopes) XXX_Size() int {
+ return m.Size()
+}
+func (m *OptionalScopes) XXX_DiscardUnknown() {
+ xxx_messageInfo_OptionalScopes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OptionalScopes proto.InternalMessageInfo
+
+func (m *PolicyRule) Reset() { *m = PolicyRule{} }
+func (*PolicyRule) ProtoMessage() {}
+func (*PolicyRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{15}
+}
+func (m *PolicyRule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PolicyRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PolicyRule.Merge(m, src)
+}
+func (m *PolicyRule) XXX_Size() int {
+ return m.Size()
+}
+func (m *PolicyRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_PolicyRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PolicyRule proto.InternalMessageInfo
+
+func (m *ResourceAccessReview) Reset() { *m = ResourceAccessReview{} }
+func (*ResourceAccessReview) ProtoMessage() {}
+func (*ResourceAccessReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{16}
+}
+func (m *ResourceAccessReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceAccessReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceAccessReview.Merge(m, src)
+}
+func (m *ResourceAccessReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceAccessReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceAccessReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceAccessReview proto.InternalMessageInfo
+
+func (m *ResourceAccessReviewResponse) Reset() { *m = ResourceAccessReviewResponse{} }
+func (*ResourceAccessReviewResponse) ProtoMessage() {}
+func (*ResourceAccessReviewResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{17}
+}
+func (m *ResourceAccessReviewResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceAccessReviewResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceAccessReviewResponse.Merge(m, src)
+}
+func (m *ResourceAccessReviewResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceAccessReviewResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceAccessReviewResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceAccessReviewResponse proto.InternalMessageInfo
+
+func (m *Role) Reset() { *m = Role{} }
+func (*Role) ProtoMessage() {}
+func (*Role) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{18}
+}
+func (m *Role) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Role) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Role.Merge(m, src)
+}
+func (m *Role) XXX_Size() int {
+ return m.Size()
+}
+func (m *Role) XXX_DiscardUnknown() {
+ xxx_messageInfo_Role.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Role proto.InternalMessageInfo
+
+func (m *RoleBinding) Reset() { *m = RoleBinding{} }
+func (*RoleBinding) ProtoMessage() {}
+func (*RoleBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{19}
+}
+func (m *RoleBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RoleBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RoleBinding.Merge(m, src)
+}
+func (m *RoleBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *RoleBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_RoleBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RoleBinding proto.InternalMessageInfo
+
+func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
+func (*RoleBindingList) ProtoMessage() {}
+func (*RoleBindingList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{20}
+}
+func (m *RoleBindingList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RoleBindingList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RoleBindingList.Merge(m, src)
+}
+func (m *RoleBindingList) XXX_Size() int {
+ return m.Size()
+}
+func (m *RoleBindingList) XXX_DiscardUnknown() {
+ xxx_messageInfo_RoleBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo
+
+func (m *RoleBindingRestriction) Reset() { *m = RoleBindingRestriction{} }
+func (*RoleBindingRestriction) ProtoMessage() {}
+func (*RoleBindingRestriction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{21}
+}
+func (m *RoleBindingRestriction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RoleBindingRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RoleBindingRestriction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RoleBindingRestriction.Merge(m, src)
+}
+func (m *RoleBindingRestriction) XXX_Size() int {
+ return m.Size()
+}
+func (m *RoleBindingRestriction) XXX_DiscardUnknown() {
+ xxx_messageInfo_RoleBindingRestriction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RoleBindingRestriction proto.InternalMessageInfo
+
+func (m *RoleBindingRestrictionList) Reset() { *m = RoleBindingRestrictionList{} }
+func (*RoleBindingRestrictionList) ProtoMessage() {}
+func (*RoleBindingRestrictionList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{22}
+}
+func (m *RoleBindingRestrictionList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RoleBindingRestrictionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RoleBindingRestrictionList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RoleBindingRestrictionList.Merge(m, src)
+}
+func (m *RoleBindingRestrictionList) XXX_Size() int {
+ return m.Size()
+}
+func (m *RoleBindingRestrictionList) XXX_DiscardUnknown() {
+ xxx_messageInfo_RoleBindingRestrictionList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RoleBindingRestrictionList proto.InternalMessageInfo
+
+func (m *RoleBindingRestrictionSpec) Reset() { *m = RoleBindingRestrictionSpec{} }
+func (*RoleBindingRestrictionSpec) ProtoMessage() {}
+func (*RoleBindingRestrictionSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{23}
+}
+func (m *RoleBindingRestrictionSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RoleBindingRestrictionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RoleBindingRestrictionSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RoleBindingRestrictionSpec.Merge(m, src)
+}
+func (m *RoleBindingRestrictionSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *RoleBindingRestrictionSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_RoleBindingRestrictionSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RoleBindingRestrictionSpec proto.InternalMessageInfo
+
+func (m *RoleList) Reset() { *m = RoleList{} }
+func (*RoleList) ProtoMessage() {}
+func (*RoleList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{24}
+}
+func (m *RoleList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RoleList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RoleList.Merge(m, src)
+}
+func (m *RoleList) XXX_Size() int {
+ return m.Size()
+}
+func (m *RoleList) XXX_DiscardUnknown() {
+ xxx_messageInfo_RoleList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RoleList proto.InternalMessageInfo
+
+func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} }
+func (*SelfSubjectRulesReview) ProtoMessage() {}
+func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{25}
+}
+func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src)
+}
+func (m *SelfSubjectRulesReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo
+
+func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} }
+func (*SelfSubjectRulesReviewSpec) ProtoMessage() {}
+func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{26}
+}
+func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src)
+}
+func (m *SelfSubjectRulesReviewSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo
+
+func (m *ServiceAccountReference) Reset() { *m = ServiceAccountReference{} }
+func (*ServiceAccountReference) ProtoMessage() {}
+func (*ServiceAccountReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{27}
+}
+func (m *ServiceAccountReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceAccountReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceAccountReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceAccountReference.Merge(m, src)
+}
+func (m *ServiceAccountReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceAccountReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceAccountReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceAccountReference proto.InternalMessageInfo
+
+func (m *ServiceAccountRestriction) Reset() { *m = ServiceAccountRestriction{} }
+func (*ServiceAccountRestriction) ProtoMessage() {}
+func (*ServiceAccountRestriction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{28}
+}
+func (m *ServiceAccountRestriction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceAccountRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceAccountRestriction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceAccountRestriction.Merge(m, src)
+}
+func (m *ServiceAccountRestriction) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceAccountRestriction) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceAccountRestriction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceAccountRestriction proto.InternalMessageInfo
+
+func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} }
+func (*SubjectAccessReview) ProtoMessage() {}
+func (*SubjectAccessReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{29}
+}
+func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SubjectAccessReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SubjectAccessReview.Merge(m, src)
+}
+func (m *SubjectAccessReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *SubjectAccessReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo
+
+func (m *SubjectAccessReviewResponse) Reset() { *m = SubjectAccessReviewResponse{} }
+func (*SubjectAccessReviewResponse) ProtoMessage() {}
+func (*SubjectAccessReviewResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{30}
+}
+func (m *SubjectAccessReviewResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SubjectAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SubjectAccessReviewResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SubjectAccessReviewResponse.Merge(m, src)
+}
+func (m *SubjectAccessReviewResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *SubjectAccessReviewResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_SubjectAccessReviewResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SubjectAccessReviewResponse proto.InternalMessageInfo
+
+func (m *SubjectRulesReview) Reset() { *m = SubjectRulesReview{} }
+func (*SubjectRulesReview) ProtoMessage() {}
+func (*SubjectRulesReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{31}
+}
+func (m *SubjectRulesReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SubjectRulesReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SubjectRulesReview.Merge(m, src)
+}
+func (m *SubjectRulesReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *SubjectRulesReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_SubjectRulesReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SubjectRulesReview proto.InternalMessageInfo
+
+func (m *SubjectRulesReviewSpec) Reset() { *m = SubjectRulesReviewSpec{} }
+func (*SubjectRulesReviewSpec) ProtoMessage() {}
+func (*SubjectRulesReviewSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{32}
+}
+func (m *SubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SubjectRulesReviewSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SubjectRulesReviewSpec.Merge(m, src)
+}
+func (m *SubjectRulesReviewSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *SubjectRulesReviewSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_SubjectRulesReviewSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SubjectRulesReviewSpec proto.InternalMessageInfo
+
+func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} }
+func (*SubjectRulesReviewStatus) ProtoMessage() {}
+func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{33}
+}
+func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src)
+}
+func (m *SubjectRulesReviewStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo
+
+func (m *UserRestriction) Reset() { *m = UserRestriction{} }
+func (*UserRestriction) ProtoMessage() {}
+func (*UserRestriction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39b89822f939ca46, []int{34}
+}
+func (m *UserRestriction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UserRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *UserRestriction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserRestriction.Merge(m, src)
+}
+func (m *UserRestriction) XXX_Size() int {
+ return m.Size()
+}
+func (m *UserRestriction) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserRestriction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserRestriction proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Action)(nil), "github.com.openshift.api.authorization.v1.Action")
+ proto.RegisterType((*ClusterRole)(nil), "github.com.openshift.api.authorization.v1.ClusterRole")
+ proto.RegisterType((*ClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBinding")
+ proto.RegisterType((*ClusterRoleBindingList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBindingList")
+ proto.RegisterType((*ClusterRoleList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleList")
+ proto.RegisterType((*GroupRestriction)(nil), "github.com.openshift.api.authorization.v1.GroupRestriction")
+ proto.RegisterType((*IsPersonalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.IsPersonalSubjectAccessReview")
+ proto.RegisterType((*LocalResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalResourceAccessReview")
+ proto.RegisterType((*LocalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalSubjectAccessReview")
+ proto.RegisterType((*NamedClusterRole)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRole")
+ proto.RegisterType((*NamedClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRoleBinding")
+ proto.RegisterType((*NamedRole)(nil), "github.com.openshift.api.authorization.v1.NamedRole")
+ proto.RegisterType((*NamedRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedRoleBinding")
+ proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.authorization.v1.OptionalNames")
+ proto.RegisterType((*OptionalScopes)(nil), "github.com.openshift.api.authorization.v1.OptionalScopes")
+ proto.RegisterType((*PolicyRule)(nil), "github.com.openshift.api.authorization.v1.PolicyRule")
+ proto.RegisterType((*ResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReview")
+ proto.RegisterType((*ResourceAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReviewResponse")
+ proto.RegisterType((*Role)(nil), "github.com.openshift.api.authorization.v1.Role")
+ proto.RegisterType((*RoleBinding)(nil), "github.com.openshift.api.authorization.v1.RoleBinding")
+ proto.RegisterType((*RoleBindingList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingList")
+ proto.RegisterType((*RoleBindingRestriction)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestriction")
+ proto.RegisterType((*RoleBindingRestrictionList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionList")
+ proto.RegisterType((*RoleBindingRestrictionSpec)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionSpec")
+ proto.RegisterType((*RoleList)(nil), "github.com.openshift.api.authorization.v1.RoleList")
+ proto.RegisterType((*SelfSubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReview")
+ proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReviewSpec")
+ proto.RegisterType((*ServiceAccountReference)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountReference")
+ proto.RegisterType((*ServiceAccountRestriction)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountRestriction")
+ proto.RegisterType((*SubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReview")
+ proto.RegisterType((*SubjectAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReviewResponse")
+ proto.RegisterType((*SubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReview")
+ proto.RegisterType((*SubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewSpec")
+ proto.RegisterType((*SubjectRulesReviewStatus)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewStatus")
+ proto.RegisterType((*UserRestriction)(nil), "github.com.openshift.api.authorization.v1.UserRestriction")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/authorization/v1/generated.proto", fileDescriptor_39b89822f939ca46)
+}
+
+var fileDescriptor_39b89822f939ca46 = []byte{
+ // 1821 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0xcd, 0x6f, 0x1b, 0x59,
+ 0x3d, 0xcf, 0x76, 0x1c, 0xfb, 0xe7, 0x26, 0xce, 0xbe, 0x66, 0xdb, 0x69, 0xa0, 0xb6, 0x35, 0x20,
+ 0x48, 0x05, 0x3b, 0x26, 0x01, 0x4a, 0xdb, 0x15, 0x5a, 0xd9, 0xdd, 0xa8, 0x8a, 0x54, 0x9a, 0xec,
+ 0x0b, 0xbb, 0x5a, 0x2d, 0x1f, 0x62, 0x3c, 0x79, 0xb1, 0x87, 0x8c, 0x67, 0xac, 0x79, 0xe3, 0x94,
+ 0x82, 0x90, 0x0a, 0x12, 0x07, 0x2e, 0x68, 0x2f, 0x20, 0x8e, 0x20, 0xfe, 0x00, 0xc4, 0x05, 0x09,
+ 0x24, 0x38, 0x71, 0xe8, 0x81, 0x43, 0x25, 0x2e, 0x15, 0x42, 0x86, 0xba, 0x88, 0x03, 0x07, 0xfe,
+ 0x06, 0xf4, 0xde, 0xbc, 0xf1, 0x7c, 0x78, 0xac, 0x78, 0x92, 0x26, 0x82, 0x55, 0x6f, 0x9e, 0xf7,
+ 0xfb, 0xfe, 0x7c, 0xbf, 0xdf, 0x33, 0xdc, 0xee, 0x9a, 0x5e, 0x6f, 0xd8, 0xd1, 0x0c, 0xa7, 0xdf,
+ 0x74, 0x06, 0xd4, 0x66, 0x3d, 0xf3, 0xd0, 0x6b, 0xea, 0x03, 0xb3, 0xa9, 0x0f, 0xbd, 0x9e, 0xe3,
+ 0x9a, 0xdf, 0xd5, 0x3d, 0xd3, 0xb1, 0x9b, 0xc7, 0x9b, 0xcd, 0x2e, 0xb5, 0xa9, 0xab, 0x7b, 0xf4,
+ 0x40, 0x1b, 0xb8, 0x8e, 0xe7, 0xe0, 0x1b, 0x21, 0xa9, 0x36, 0x21, 0xd5, 0xf4, 0x81, 0xa9, 0xc5,
+ 0x48, 0xb5, 0xe3, 0xcd, 0xf5, 0x37, 0x22, 0x52, 0xba, 0x4e, 0xd7, 0x69, 0x0a, 0x0e, 0x9d, 0xe1,
+ 0xa1, 0xf8, 0x12, 0x1f, 0xe2, 0x97, 0xcf, 0x79, 0x5d, 0x3d, 0xba, 0xc5, 0x34, 0xd3, 0x11, 0x6a,
+ 0x18, 0x8e, 0x4b, 0x53, 0xa4, 0xc7, 0x70, 0xdc, 0x8e, 0x6e, 0xa4, 0xe1, 0x7c, 0x21, 0xc4, 0xe9,
+ 0xeb, 0x46, 0xcf, 0xb4, 0xa9, 0xfb, 0xa8, 0x39, 0x38, 0xea, 0xf2, 0x03, 0xd6, 0xec, 0x53, 0x4f,
+ 0x4f, 0xa3, 0x6a, 0xce, 0xa2, 0x72, 0x87, 0xb6, 0x67, 0xf6, 0xe9, 0x14, 0xc1, 0xcd, 0x93, 0x08,
+ 0x98, 0xd1, 0xa3, 0x7d, 0x3d, 0x49, 0xa7, 0xfe, 0xa0, 0x00, 0xc5, 0x96, 0xc1, 0x7d, 0x84, 0x9b,
+ 0x50, 0xb6, 0xf5, 0x3e, 0x65, 0x03, 0xdd, 0xa0, 0x0a, 0x6a, 0xa0, 0x8d, 0x72, 0xfb, 0xb5, 0x27,
+ 0xa3, 0xfa, 0xc2, 0x78, 0x54, 0x2f, 0x3f, 0x08, 0x00, 0x24, 0xc4, 0xc1, 0x0d, 0x28, 0x1c, 0x53,
+ 0xb7, 0xa3, 0xe4, 0x04, 0xee, 0x25, 0x89, 0x5b, 0x78, 0x8f, 0xba, 0x1d, 0x22, 0x20, 0xf8, 0x36,
+ 0xac, 0xba, 0x94, 0x39, 0x43, 0xd7, 0xa0, 0xad, 0xbd, 0x9d, 0x7b, 0xae, 0x33, 0x1c, 0x28, 0x79,
+ 0x81, 0xbd, 0x2c, 0xb1, 0x17, 0xc5, 0x21, 0x99, 0x42, 0xc3, 0x6f, 0x01, 0x8e, 0x9c, 0xbd, 0x47,
+ 0x5d, 0x66, 0x3a, 0xb6, 0x52, 0x10, 0xc4, 0x55, 0x49, 0xbc, 0x24, 0x8f, 0x49, 0x0a, 0x2a, 0xfe,
+ 0x2c, 0x94, 0x82, 0x53, 0x65, 0x51, 0x90, 0xad, 0x4a, 0xb2, 0x12, 0x91, 0xe7, 0x64, 0x82, 0x81,
+ 0x6f, 0xc1, 0xa5, 0xe0, 0x37, 0xb7, 0x55, 0x29, 0x0a, 0x8a, 0x35, 0x49, 0x71, 0x89, 0x44, 0x60,
+ 0x24, 0x86, 0xc9, 0xbd, 0x30, 0xd0, 0xbd, 0x9e, 0x52, 0x8a, 0x7b, 0x61, 0x4f, 0xf7, 0x7a, 0x44,
+ 0x40, 0xf0, 0xdb, 0xb0, 0x6a, 0xb2, 0x07, 0x8e, 0x1d, 0x30, 0x79, 0x97, 0xdc, 0x57, 0xca, 0x0d,
+ 0xb4, 0x51, 0x6a, 0x2b, 0x12, 0x7b, 0x75, 0x27, 0x01, 0x27, 0x53, 0x14, 0xf8, 0x7d, 0x58, 0x32,
+ 0x1c, 0xdb, 0xa3, 0xb6, 0xa7, 0x2c, 0x35, 0xd0, 0x46, 0x65, 0xeb, 0x0d, 0xcd, 0x8f, 0xb9, 0x16,
+ 0x8d, 0xb9, 0x36, 0x38, 0xea, 0x6a, 0x32, 0xe6, 0x1a, 0xd1, 0x1f, 0x6e, 0x7f, 0xc7, 0xa3, 0x36,
+ 0xf7, 0x47, 0xe8, 0xb4, 0xbb, 0x3e, 0x17, 0x12, 0xb0, 0x53, 0x7f, 0x9d, 0x83, 0xca, 0x5d, 0x6b,
+ 0xc8, 0x3c, 0xea, 0x12, 0xc7, 0xa2, 0xf8, 0x5b, 0x50, 0xe2, 0x79, 0x79, 0xa0, 0x7b, 0xba, 0xc8,
+ 0x83, 0xca, 0xd6, 0xe7, 0x66, 0x8a, 0xe2, 0x59, 0xac, 0x71, 0x6c, 0xed, 0x78, 0x53, 0xdb, 0xed,
+ 0x7c, 0x9b, 0x1a, 0xde, 0x57, 0xa8, 0xa7, 0xb7, 0xb1, 0x94, 0x06, 0xe1, 0x19, 0x99, 0x70, 0xc5,
+ 0x1f, 0xc0, 0xa2, 0x3b, 0xb4, 0x28, 0x53, 0x72, 0x8d, 0xfc, 0x46, 0x65, 0xeb, 0x8b, 0xda, 0xdc,
+ 0x65, 0xac, 0xed, 0x39, 0x96, 0x69, 0x3c, 0x22, 0x43, 0x8b, 0x86, 0x39, 0xc4, 0xbf, 0x18, 0xf1,
+ 0x59, 0xe2, 0x0e, 0x54, 0xf5, 0x6e, 0xd7, 0xa5, 0x5d, 0x41, 0xc2, 0x41, 0x22, 0xe5, 0x2a, 0x5b,
+ 0x9f, 0x88, 0x18, 0xa1, 0xf1, 0x72, 0xe5, 0xec, 0x5a, 0x71, 0xd4, 0xf6, 0xe5, 0xf1, 0xa8, 0x5e,
+ 0x4d, 0x1c, 0x92, 0x24, 0x43, 0xf5, 0xdf, 0x79, 0xc0, 0x11, 0x8f, 0xb5, 0x4d, 0xfb, 0xc0, 0xb4,
+ 0xbb, 0x17, 0xe0, 0x38, 0x0a, 0xe5, 0x21, 0xa3, 0xae, 0x28, 0x47, 0x51, 0x77, 0x95, 0xad, 0x5b,
+ 0x19, 0x9c, 0xb7, 0x3b, 0xe0, 0xbf, 0x74, 0x4b, 0xd0, 0xb7, 0x97, 0x79, 0x65, 0xbf, 0x1b, 0xb0,
+ 0x23, 0x21, 0x67, 0xdc, 0x03, 0xe8, 0xf2, 0x2a, 0xf4, 0xe5, 0xe4, 0xcf, 0x28, 0x67, 0x85, 0x9b,
+ 0x73, 0x6f, 0xc2, 0x8f, 0x44, 0x78, 0xe3, 0x77, 0xa0, 0xc4, 0x86, 0xc2, 0x52, 0xa6, 0x14, 0x44,
+ 0x32, 0xc4, 0xc2, 0xc4, 0x3b, 0x6f, 0xe8, 0x20, 0x42, 0x0f, 0xa9, 0x4b, 0x6d, 0x83, 0x86, 0xa5,
+ 0xbc, 0x2f, 0x89, 0xc9, 0x84, 0x0d, 0x7e, 0x00, 0x4b, 0xae, 0x63, 0x51, 0x42, 0x0f, 0x45, 0xdd,
+ 0xcf, 0xc9, 0x71, 0x52, 0x1e, 0xc4, 0xa7, 0x25, 0x01, 0x13, 0xf5, 0xaf, 0x08, 0xae, 0x4c, 0x07,
+ 0xfb, 0xbe, 0xc9, 0x3c, 0xfc, 0xf5, 0xa9, 0x80, 0x6b, 0xf3, 0x05, 0x9c, 0x53, 0x8b, 0x70, 0x4f,
+ 0x0c, 0x09, 0x4e, 0x22, 0xc1, 0xee, 0xc0, 0xa2, 0xe9, 0xd1, 0x7e, 0x50, 0x25, 0x5f, 0xce, 0x10,
+ 0x80, 0x69, 0x7d, 0xc3, 0x6a, 0xd9, 0xe1, 0x3c, 0x89, 0xcf, 0x5a, 0xfd, 0x33, 0x82, 0x6a, 0x04,
+ 0xf9, 0x02, 0xac, 0xfa, 0x5a, 0xdc, 0xaa, 0x9b, 0xa7, 0xb4, 0x2a, 0xdd, 0x9c, 0x9f, 0x21, 0x58,
+ 0xf5, 0x6f, 0x14, 0xca, 0x3c, 0xd7, 0xf4, 0x2f, 0x36, 0x15, 0x8a, 0x22, 0xe3, 0x98, 0x82, 0x1a,
+ 0xf9, 0x8d, 0x72, 0x1b, 0xc6, 0xa3, 0x7a, 0x51, 0x60, 0x31, 0x22, 0x21, 0xf8, 0x9b, 0x50, 0xb4,
+ 0xf4, 0x0e, 0xb5, 0x02, 0xb5, 0x3e, 0x3f, 0xa7, 0xc5, 0x9c, 0x66, 0x9f, 0x5a, 0xd4, 0xf0, 0x1c,
+ 0x37, 0xbc, 0x2e, 0x83, 0x13, 0x46, 0x24, 0x57, 0xb5, 0x0e, 0xd7, 0x77, 0xd8, 0x1e, 0x75, 0x19,
+ 0x2f, 0x0b, 0x99, 0xb4, 0x2d, 0xc3, 0xa0, 0x8c, 0x11, 0x7a, 0x6c, 0xd2, 0x87, 0xaa, 0x05, 0xd7,
+ 0xee, 0x3b, 0x86, 0x6e, 0x05, 0x2d, 0x3f, 0x0a, 0xc4, 0xbb, 0xc1, 0x25, 0x2d, 0xe3, 0xb1, 0x99,
+ 0xc1, 0x69, 0x3e, 0x61, 0xbb, 0xc0, 0x75, 0x23, 0x92, 0x8d, 0xfa, 0xd3, 0x1c, 0x28, 0x42, 0x5c,
+ 0x8a, 0x2a, 0x2f, 0x5d, 0x1a, 0xbf, 0x22, 0x79, 0x6f, 0x49, 0x0e, 0x0a, 0xbc, 0xf5, 0x10, 0x01,
+ 0xc1, 0x9f, 0x9e, 0x84, 0x28, 0x2f, 0x42, 0x54, 0x1d, 0x8f, 0xea, 0x15, 0x3f, 0x44, 0xfb, 0x96,
+ 0x69, 0xd0, 0x49, 0x9c, 0xbe, 0x01, 0x45, 0x66, 0x38, 0x03, 0xca, 0xc4, 0x28, 0x50, 0xd9, 0xba,
+ 0x7d, 0x8a, 0xae, 0xb4, 0x2f, 0x18, 0xf8, 0x69, 0xe0, 0xff, 0x26, 0x92, 0xa9, 0xfa, 0x13, 0x04,
+ 0xab, 0xbc, 0x31, 0x1d, 0x44, 0xef, 0xc3, 0x06, 0x14, 0xf8, 0xd0, 0x23, 0x67, 0xa2, 0x89, 0xfa,
+ 0x62, 0x16, 0x10, 0x10, 0xfc, 0x3e, 0x14, 0x78, 0xb7, 0x90, 0x1d, 0xf9, 0xb4, 0x29, 0x3d, 0xe1,
+ 0x2c, 0x5a, 0x90, 0xe0, 0xa8, 0xfe, 0x06, 0xc1, 0xd5, 0xa4, 0x42, 0xc1, 0x75, 0x73, 0xb2, 0x5e,
+ 0x1e, 0x54, 0xdc, 0x90, 0x40, 0xaa, 0x77, 0xc6, 0x3e, 0x72, 0x59, 0xca, 0xa9, 0x44, 0x0e, 0x49,
+ 0x54, 0x8c, 0xfa, 0x18, 0x81, 0x18, 0x18, 0x0f, 0xe6, 0xf4, 0xde, 0x3b, 0x31, 0xef, 0x35, 0x33,
+ 0xa8, 0x37, 0xd3, 0x6d, 0xbf, 0x0a, 0xe2, 0x98, 0xcd, 0x5f, 0xfd, 0x34, 0x7f, 0xdd, 0xcc, 0xaa,
+ 0xd0, 0xdc, 0x8e, 0xba, 0x03, 0xcb, 0xb1, 0x9b, 0x12, 0xd7, 0x83, 0xde, 0xe8, 0x37, 0xaa, 0x72,
+ 0xb2, 0xbf, 0xdd, 0x29, 0xfd, 0xfc, 0x17, 0xf5, 0x85, 0xc7, 0x7f, 0x6b, 0x2c, 0xa8, 0x6f, 0xc2,
+ 0x4a, 0x3c, 0x9f, 0xb3, 0x10, 0xff, 0x38, 0x0f, 0x10, 0x0e, 0x52, 0x9c, 0x92, 0x8f, 0xeb, 0x31,
+ 0x4a, 0x3e, 0xc5, 0x33, 0xe2, 0x9f, 0xe3, 0x1f, 0x22, 0x78, 0x5d, 0xf7, 0x3c, 0xd7, 0xec, 0x0c,
+ 0x3d, 0x1a, 0x69, 0xad, 0xc1, 0x0c, 0x92, 0x71, 0x14, 0xbd, 0x2e, 0x3d, 0xf3, 0x7a, 0x2b, 0x8d,
+ 0x27, 0x49, 0x17, 0x85, 0x3f, 0x03, 0x65, 0x7d, 0x60, 0xde, 0x8b, 0xb6, 0x09, 0x31, 0xc1, 0x04,
+ 0x2b, 0x03, 0x23, 0x21, 0x9c, 0x23, 0x07, 0x53, 0xba, 0x3f, 0x58, 0x48, 0xe4, 0xa0, 0xbd, 0x32,
+ 0x12, 0xc2, 0xf1, 0x97, 0x60, 0x39, 0x3a, 0xd2, 0x33, 0x65, 0x51, 0x10, 0xbc, 0x36, 0x1e, 0xd5,
+ 0x97, 0xa3, 0x93, 0x3f, 0x23, 0x71, 0x3c, 0xdc, 0x86, 0xaa, 0x1d, 0x9b, 0xd2, 0x99, 0x52, 0x14,
+ 0xa4, 0xca, 0x78, 0x54, 0x5f, 0x8b, 0x0f, 0xf0, 0xb2, 0x91, 0x25, 0x09, 0xd4, 0x2e, 0xac, 0x5d,
+ 0x4c, 0xcf, 0xff, 0x3b, 0x82, 0x8f, 0xa7, 0x49, 0x22, 0x94, 0x0d, 0x1c, 0x9b, 0xd1, 0xec, 0x0b,
+ 0xe0, 0x27, 0x61, 0x91, 0x77, 0x6f, 0xff, 0xce, 0x2c, 0xfb, 0x73, 0x1e, 0x6f, 0xea, 0xd2, 0x54,
+ 0x1f, 0x38, 0x7f, 0x6f, 0x7f, 0x0b, 0x56, 0xe8, 0xb1, 0x6e, 0x0d, 0xb9, 0xb6, 0xdb, 0xae, 0xeb,
+ 0xb8, 0x72, 0xdd, 0xbb, 0x2a, 0x95, 0xa8, 0x6e, 0x73, 0xa8, 0x3e, 0x01, 0x93, 0x04, 0xba, 0xfa,
+ 0x27, 0x04, 0x85, 0xff, 0xff, 0x0d, 0x46, 0x7d, 0x91, 0x87, 0xca, 0xab, 0xb5, 0xe2, 0xa3, 0xbe,
+ 0x56, 0xf0, 0xc9, 0xfb, 0x62, 0xf7, 0x89, 0x33, 0x4c, 0xde, 0x27, 0x2f, 0x12, 0x2f, 0x10, 0x5c,
+ 0x89, 0x5e, 0x74, 0x91, 0xf9, 0xfb, 0xfc, 0xf3, 0xb7, 0x0b, 0x05, 0x36, 0xa0, 0x86, 0x4c, 0xdd,
+ 0xed, 0xd3, 0x19, 0x16, 0x51, 0x79, 0x7f, 0x40, 0x8d, 0x70, 0x40, 0xe0, 0x5f, 0x44, 0x08, 0x50,
+ 0xc7, 0x08, 0xd6, 0xd3, 0x49, 0x2e, 0x20, 0x7e, 0x87, 0xf1, 0xf8, 0xb5, 0xce, 0x6c, 0xe6, 0x8c,
+ 0x50, 0xfe, 0x3e, 0x3f, 0xcb, 0x48, 0xee, 0x09, 0xfc, 0x08, 0xaa, 0xbc, 0xa4, 0xdd, 0xf0, 0x58,
+ 0xda, 0x7a, 0x27, 0x83, 0x42, 0x62, 0xf6, 0x8f, 0x68, 0x22, 0xde, 0x5d, 0x12, 0x87, 0x24, 0x29,
+ 0x07, 0x7f, 0x1f, 0x56, 0x45, 0x91, 0x47, 0x65, 0xfb, 0x31, 0x7f, 0x33, 0x83, 0xec, 0xe4, 0x82,
+ 0xd8, 0x5e, 0x1b, 0x8f, 0xea, 0x53, 0x6b, 0x23, 0x99, 0x12, 0x85, 0x7f, 0x89, 0xe0, 0x1a, 0xa3,
+ 0xee, 0xb1, 0x69, 0x50, 0xdd, 0x30, 0x9c, 0xa1, 0xed, 0x45, 0x15, 0xf1, 0xfb, 0xd9, 0xdb, 0x19,
+ 0x14, 0xd9, 0xf7, 0x79, 0xb5, 0x7c, 0x5e, 0x51, 0x8d, 0xae, 0x8f, 0x47, 0xf5, 0x6b, 0x33, 0xc1,
+ 0x64, 0xb6, 0x16, 0xea, 0x1f, 0x11, 0x94, 0x2e, 0x68, 0x93, 0xff, 0x6a, 0x3c, 0x1f, 0x33, 0x0f,
+ 0xee, 0xe9, 0xd9, 0xf7, 0x1f, 0x04, 0x57, 0xf6, 0xa9, 0x75, 0x28, 0x5b, 0xb0, 0x7f, 0x33, 0xfa,
+ 0x23, 0x51, 0x50, 0xe6, 0x28, 0x73, 0x99, 0xa7, 0x33, 0x9c, 0x55, 0xe6, 0xf8, 0x08, 0x8a, 0xcc,
+ 0xd3, 0xbd, 0x61, 0x70, 0x19, 0xde, 0xcd, 0x22, 0x6a, 0x5a, 0x8c, 0x60, 0xd5, 0x5e, 0x91, 0x82,
+ 0x8a, 0xfe, 0x37, 0x91, 0x22, 0xd4, 0xef, 0xc1, 0xfa, 0x6c, 0xf5, 0x22, 0x0b, 0x2f, 0x3a, 0x8f,
+ 0x85, 0xd7, 0x82, 0xab, 0xc9, 0x34, 0x93, 0x57, 0xd7, 0x1c, 0xeb, 0x52, 0x6c, 0x60, 0xcc, 0x9d,
+ 0x3c, 0x30, 0xaa, 0x7f, 0x41, 0x30, 0x3b, 0xab, 0xf1, 0x8f, 0x10, 0x54, 0xe3, 0x89, 0xed, 0x6f,
+ 0x24, 0x95, 0xad, 0xf6, 0x19, 0x8a, 0x2a, 0xb8, 0x89, 0x27, 0x53, 0x64, 0x1c, 0x81, 0x91, 0xa4,
+ 0x4c, 0xac, 0x01, 0x4c, 0x54, 0x8e, 0xcd, 0xb6, 0x13, 0x9b, 0x18, 0x89, 0x60, 0xa8, 0x1f, 0xe6,
+ 0xe0, 0xf2, 0xab, 0x77, 0x94, 0x58, 0x5a, 0xfd, 0x13, 0xc1, 0xc7, 0x52, 0x5c, 0x72, 0xfa, 0x55,
+ 0xe3, 0x06, 0x2c, 0xe9, 0x96, 0xe5, 0x3c, 0xa4, 0x07, 0xc2, 0xfa, 0x52, 0x38, 0x58, 0xb5, 0xfc,
+ 0x63, 0x12, 0xc0, 0xf1, 0xa7, 0xa0, 0xe8, 0x52, 0x9d, 0xc9, 0x8e, 0x5c, 0x0e, 0xeb, 0x8e, 0x88,
+ 0x53, 0x22, 0xa1, 0xb8, 0x05, 0x55, 0x1a, 0x5f, 0x28, 0x4e, 0xda, 0x37, 0x92, 0xf8, 0xea, 0xbf,
+ 0x10, 0xe0, 0x94, 0x3e, 0x65, 0xc4, 0xfa, 0x54, 0xeb, 0x6c, 0xcd, 0xe3, 0x7f, 0xa2, 0x47, 0xfd,
+ 0x81, 0x37, 0xe5, 0xf4, 0x06, 0x15, 0x24, 0x25, 0x9a, 0x99, 0x94, 0xe1, 0xfb, 0x6b, 0x6e, 0xe6,
+ 0xfb, 0x6b, 0x98, 0x8f, 0xf9, 0xf3, 0xc8, 0xc7, 0xdf, 0x21, 0x50, 0x66, 0x19, 0x1d, 0xee, 0x72,
+ 0xe8, 0xe5, 0xff, 0x1b, 0x95, 0x92, 0x64, 0xb9, 0x8c, 0x49, 0xf6, 0x5b, 0x04, 0xc9, 0xc9, 0x08,
+ 0xd7, 0x83, 0xcd, 0x3b, 0xf2, 0x62, 0x23, 0x36, 0xef, 0x60, 0xe9, 0x9e, 0xc7, 0xe7, 0xe1, 0x9b,
+ 0x77, 0xfe, 0x3c, 0xde, 0xbc, 0xdb, 0xbb, 0x4f, 0x9e, 0xd7, 0x16, 0x9e, 0x3e, 0xaf, 0x2d, 0x3c,
+ 0x7b, 0x5e, 0x5b, 0x78, 0x3c, 0xae, 0xa1, 0x27, 0xe3, 0x1a, 0x7a, 0x3a, 0xae, 0xa1, 0x67, 0xe3,
+ 0x1a, 0xfa, 0xc7, 0xb8, 0x86, 0x3e, 0x7c, 0x51, 0x5b, 0xf8, 0xe0, 0xc6, 0xdc, 0xff, 0xfe, 0xff,
+ 0x37, 0x00, 0x00, 0xff, 0xff, 0xac, 0xa0, 0x30, 0xab, 0x29, 0x20, 0x00, 0x00,
+}
+
+func (m *Action) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Action) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Action) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.IsNonResourceURL {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x48
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x42
+ {
+ size, err := m.Content.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.ResourceName)
+ copy(dAtA[i:], m.ResourceName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Resource)
+ copy(dAtA[i:], m.Resource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Group)
+ copy(dAtA[i:], m.Group)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Verb)
+ copy(dAtA[i:], m.Verb)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterRole) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AggregationRule != nil {
+ {
+ size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Rules) > 0 {
+ for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Subjects) > 0 {
+ for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.GroupNames != nil {
+ {
+ size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.UserNames != nil {
+ {
+ size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GroupRestriction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GroupRestriction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Selectors) > 0 {
+ for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Groups) > 0 {
+ for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Groups[iNdEx])
+ copy(dAtA[i:], m.Groups[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IsPersonalSubjectAccessReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IsPersonalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IsPersonalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *LocalResourceAccessReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LocalResourceAccessReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LocalResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Action.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Scopes != nil {
+ {
+ size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.GroupsSlice) > 0 {
+ for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.GroupsSlice[iNdEx])
+ copy(dAtA[i:], m.GroupsSlice[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.User)
+ copy(dAtA[i:], m.User)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.User)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Action.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NamedClusterRole) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamedClusterRole) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamedClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Role.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NamedClusterRoleBinding) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamedClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamedClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NamedRole) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamedRole) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamedRole) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Role.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NamedRoleBinding) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamedRoleBinding) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamedRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m OptionalNames) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m[iNdEx])
+ copy(dAtA[i:], m[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m OptionalScopes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m OptionalScopes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m OptionalScopes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m[iNdEx])
+ copy(dAtA[i:], m[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PolicyRule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NonResourceURLsSlice) > 0 {
+ for iNdEx := len(m.NonResourceURLsSlice) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.NonResourceURLsSlice[iNdEx])
+ copy(dAtA[i:], m.NonResourceURLsSlice[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLsSlice[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.ResourceNames) > 0 {
+ for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ResourceNames[iNdEx])
+ copy(dAtA[i:], m.ResourceNames[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Resources) > 0 {
+ for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Resources[iNdEx])
+ copy(dAtA[i:], m.Resources[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.APIGroups) > 0 {
+ for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.APIGroups[iNdEx])
+ copy(dAtA[i:], m.APIGroups[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.AttributeRestrictions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Verbs) > 0 {
+ for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Verbs[iNdEx])
+ copy(dAtA[i:], m.Verbs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceAccessReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceAccessReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Action.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceAccessReviewResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.EvaluationError)
+ copy(dAtA[i:], m.EvaluationError)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError)))
+ i--
+ dAtA[i] = 0x22
+ if len(m.GroupsSlice) > 0 {
+ for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.GroupsSlice[iNdEx])
+ copy(dAtA[i:], m.GroupsSlice[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.UsersSlice) > 0 {
+ for iNdEx := len(m.UsersSlice) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.UsersSlice[iNdEx])
+ copy(dAtA[i:], m.UsersSlice[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UsersSlice[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Role) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Role) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Rules) > 0 {
+ for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RoleBinding) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Subjects) > 0 {
+ for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.GroupNames != nil {
+ {
+ size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.UserNames != nil {
+ {
+ size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RoleBindingList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RoleBindingRestriction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoleBindingRestriction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RoleBindingRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RoleBindingRestrictionList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoleBindingRestrictionList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RoleBindingRestrictionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RoleBindingRestrictionSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoleBindingRestrictionSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RoleBindingRestrictionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ServiceAccountRestriction != nil {
+ {
+ size, err := m.ServiceAccountRestriction.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.GroupRestriction != nil {
+ {
+ size, err := m.GroupRestriction.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.UserRestriction != nil {
+ {
+ size, err := m.UserRestriction.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RoleList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoleList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Scopes != nil {
+ {
+ size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceAccountReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceAccountReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceAccountReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceAccountRestriction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceAccountRestriction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceAccountRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Namespaces) > 0 {
+ for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Namespaces[iNdEx])
+ copy(dAtA[i:], m.Namespaces[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.ServiceAccounts) > 0 {
+ for iNdEx := len(m.ServiceAccounts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ServiceAccounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Scopes != nil {
+ {
+ size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.GroupsSlice) > 0 {
+ for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.GroupsSlice[iNdEx])
+ copy(dAtA[i:], m.GroupsSlice[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.User)
+ copy(dAtA[i:], m.User)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.User)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Action.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectAccessReviewResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SubjectAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SubjectAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.EvaluationError)
+ copy(dAtA[i:], m.EvaluationError)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x1a
+ i--
+ if m.Allowed {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectRulesReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SubjectRulesReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Scopes != nil {
+ {
+ size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Groups) > 0 {
+ for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Groups[iNdEx])
+ copy(dAtA[i:], m.Groups[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.User)
+ copy(dAtA[i:], m.User)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.User)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.EvaluationError)
+ copy(dAtA[i:], m.EvaluationError)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError)))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Rules) > 0 {
+ for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UserRestriction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UserRestriction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Selectors) > 0 {
+ for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Groups) > 0 {
+ for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Groups[iNdEx])
+ copy(dAtA[i:], m.Groups[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Users) > 0 {
+ for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Users[iNdEx])
+ copy(dAtA[i:], m.Users[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Action) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Verb)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Resource)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ResourceName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Content.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *ClusterRole) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.AggregationRule != nil {
+ l = m.AggregationRule.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ClusterRoleBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.UserNames != nil {
+ l = m.UserNames.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GroupNames != nil {
+ l = m.GroupNames.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Subjects) > 0 {
+ for _, e := range m.Subjects {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.RoleRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ClusterRoleBindingList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterRoleList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *GroupRestriction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IsPersonalSubjectAccessReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *LocalResourceAccessReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Action.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *LocalSubjectAccessReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Action.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.User)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.GroupsSlice) > 0 {
+ for _, s := range m.GroupsSlice {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Scopes != nil {
+ l = m.Scopes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NamedClusterRole) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Role.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NamedClusterRoleBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.RoleBinding.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NamedRole) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Role.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NamedRoleBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.RoleBinding.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m OptionalNames) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for _, s := range m {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m OptionalScopes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for _, s := range m {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PolicyRule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Verbs) > 0 {
+ for _, s := range m.Verbs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.AttributeRestrictions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.APIGroups) > 0 {
+ for _, s := range m.APIGroups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Resources) > 0 {
+ for _, s := range m.Resources {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ResourceNames) > 0 {
+ for _, s := range m.ResourceNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.NonResourceURLsSlice) > 0 {
+ for _, s := range m.NonResourceURLsSlice {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceAccessReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Action.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ResourceAccessReviewResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.UsersSlice) > 0 {
+ for _, s := range m.UsersSlice {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.GroupsSlice) > 0 {
+ for _, s := range m.GroupsSlice {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.EvaluationError)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Role) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RoleBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.UserNames != nil {
+ l = m.UserNames.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GroupNames != nil {
+ l = m.GroupNames.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Subjects) > 0 {
+ for _, e := range m.Subjects {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.RoleRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *RoleBindingList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RoleBindingRestriction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *RoleBindingRestrictionList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RoleBindingRestrictionSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.UserRestriction != nil {
+ l = m.UserRestriction.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GroupRestriction != nil {
+ l = m.GroupRestriction.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ServiceAccountRestriction != nil {
+ l = m.ServiceAccountRestriction.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *RoleList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SelfSubjectRulesReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SelfSubjectRulesReviewSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Scopes != nil {
+ l = m.Scopes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ServiceAccountReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ServiceAccountRestriction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ServiceAccounts) > 0 {
+ for _, e := range m.ServiceAccounts {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Namespaces) > 0 {
+ for _, s := range m.Namespaces {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SubjectAccessReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Action.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.User)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.GroupsSlice) > 0 {
+ for _, s := range m.GroupsSlice {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Scopes != nil {
+ l = m.Scopes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SubjectAccessReviewResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.EvaluationError)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SubjectRulesReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SubjectRulesReviewSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.User)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Scopes != nil {
+ l = m.Scopes.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SubjectRulesReviewStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.EvaluationError)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *UserRestriction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Users) > 0 {
+ for _, s := range m.Users {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Selectors) > 0 {
+ for _, e := range m.Selectors {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Action) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Action{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`,
+ `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+ `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
+ `Content:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Content), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `IsNonResourceURL:` + fmt.Sprintf("%v", this.IsNonResourceURL) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterRole) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRules := "[]PolicyRule{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRules += "}"
+ s := strings.Join([]string{`&ClusterRole{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Rules:` + repeatedStringForRules + `,`,
+ `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "v11.AggregationRule", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterRoleBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSubjects := "[]ObjectReference{"
+ for _, f := range this.Subjects {
+ repeatedStringForSubjects += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForSubjects += "}"
+ s := strings.Join([]string{`&ClusterRoleBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`,
+ `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`,
+ `Subjects:` + repeatedStringForSubjects + `,`,
+ `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterRoleBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ClusterRoleBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ClusterRoleBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterRoleList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ClusterRole{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ClusterRoleList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GroupRestriction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSelectors := "[]LabelSelector{"
+ for _, f := range this.Selectors {
+ repeatedStringForSelectors += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForSelectors += "}"
+ s := strings.Join([]string{`&GroupRestriction{`,
+ `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
+ `Selectors:` + repeatedStringForSelectors + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IsPersonalSubjectAccessReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IsPersonalSubjectAccessReview{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LocalResourceAccessReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LocalResourceAccessReview{`,
+ `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LocalSubjectAccessReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LocalSubjectAccessReview{`,
+ `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`,
+ `User:` + fmt.Sprintf("%v", this.User) + `,`,
+ `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`,
+ `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedClusterRole) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedClusterRole{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Role:` + strings.Replace(strings.Replace(this.Role.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedClusterRoleBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedClusterRoleBinding{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedRole) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedRole{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Role:` + strings.Replace(strings.Replace(this.Role.String(), "Role", "Role", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedRoleBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedRoleBinding{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PolicyRule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PolicyRule{`,
+ `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`,
+ `AttributeRestrictions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AttributeRestrictions), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`,
+ `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`,
+ `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
+ `NonResourceURLsSlice:` + fmt.Sprintf("%v", this.NonResourceURLsSlice) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceAccessReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceAccessReview{`,
+ `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceAccessReviewResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceAccessReviewResponse{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `UsersSlice:` + fmt.Sprintf("%v", this.UsersSlice) + `,`,
+ `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`,
+ `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Role) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRules := "[]PolicyRule{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRules += "}"
+ s := strings.Join([]string{`&Role{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Rules:` + repeatedStringForRules + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RoleBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSubjects := "[]ObjectReference{"
+ for _, f := range this.Subjects {
+ repeatedStringForSubjects += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForSubjects += "}"
+ s := strings.Join([]string{`&RoleBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`,
+ `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`,
+ `Subjects:` + repeatedStringForSubjects + `,`,
+ `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RoleBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]RoleBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&RoleBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RoleBindingRestriction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RoleBindingRestriction{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RoleBindingRestrictionSpec", "RoleBindingRestrictionSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RoleBindingRestrictionList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]RoleBindingRestriction{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBindingRestriction", "RoleBindingRestriction", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&RoleBindingRestrictionList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RoleBindingRestrictionSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RoleBindingRestrictionSpec{`,
+ `UserRestriction:` + strings.Replace(this.UserRestriction.String(), "UserRestriction", "UserRestriction", 1) + `,`,
+ `GroupRestriction:` + strings.Replace(this.GroupRestriction.String(), "GroupRestriction", "GroupRestriction", 1) + `,`,
+ `ServiceAccountRestriction:` + strings.Replace(this.ServiceAccountRestriction.String(), "ServiceAccountRestriction", "ServiceAccountRestriction", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RoleList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Role{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&RoleList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SelfSubjectRulesReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SelfSubjectRulesReview{`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SelfSubjectRulesReviewSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`,
+ `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceAccountReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceAccountReference{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceAccountRestriction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForServiceAccounts := "[]ServiceAccountReference{"
+ for _, f := range this.ServiceAccounts {
+ repeatedStringForServiceAccounts += strings.Replace(strings.Replace(f.String(), "ServiceAccountReference", "ServiceAccountReference", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForServiceAccounts += "}"
+ s := strings.Join([]string{`&ServiceAccountRestriction{`,
+ `ServiceAccounts:` + repeatedStringForServiceAccounts + `,`,
+ `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SubjectAccessReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SubjectAccessReview{`,
+ `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`,
+ `User:` + fmt.Sprintf("%v", this.User) + `,`,
+ `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`,
+ `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SubjectAccessReviewResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SubjectAccessReviewResponse{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SubjectRulesReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SubjectRulesReview{`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectRulesReviewSpec", "SubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SubjectRulesReviewSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SubjectRulesReviewSpec{`,
+ `User:` + fmt.Sprintf("%v", this.User) + `,`,
+ `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
+ `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SubjectRulesReviewStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRules := "[]PolicyRule{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRules += "}"
+ s := strings.Join([]string{`&SubjectRulesReviewStatus{`,
+ `Rules:` + repeatedStringForRules + `,`,
+ `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UserRestriction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSelectors := "[]LabelSelector{"
+ for _, f := range this.Selectors {
+ repeatedStringForSelectors += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForSelectors += "}"
+ s := strings.Join([]string{`&UserRestriction{`,
+ `Users:` + fmt.Sprintf("%v", this.Users) + `,`,
+ `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
+ `Selectors:` + repeatedStringForSelectors + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Action) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Action: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Action: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Verb = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Content.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsNonResourceURL", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IsNonResourceURL = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterRole) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, PolicyRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AggregationRule == nil {
+ m.AggregationRule = &v11.AggregationRule{}
+ }
+ if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UserNames == nil {
+ m.UserNames = OptionalNames{}
+ }
+ if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GroupNames == nil {
+ m.GroupNames = OptionalNames{}
+ }
+ if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subjects = append(m.Subjects, v12.ObjectReference{})
+ if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ClusterRoleBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterRoleList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ClusterRole{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupRestriction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupRestriction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupRestriction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selectors = append(m.Selectors, v1.LabelSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IsPersonalSubjectAccessReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IsPersonalSubjectAccessReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IsPersonalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LocalResourceAccessReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LocalResourceAccessReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LocalResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Scopes == nil {
+ m.Scopes = OptionalScopes{}
+ }
+ if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedClusterRole) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedClusterRole: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedClusterRole: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedClusterRoleBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedClusterRoleBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedRole) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedRole: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedRole: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedRoleBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedRoleBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OptionalNames) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ *m = append(*m, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OptionalScopes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OptionalScopes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OptionalScopes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ *m = append(*m, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PolicyRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.AttributeRestrictions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLsSlice", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NonResourceURLsSlice = append(m.NonResourceURLsSlice, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceAccessReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceAccessReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceAccessReviewResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceAccessReviewResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UsersSlice", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UsersSlice = append(m.UsersSlice, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EvaluationError = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Role) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Role: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, PolicyRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UserNames == nil {
+ m.UserNames = OptionalNames{}
+ }
+ if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GroupNames == nil {
+ m.GroupNames = OptionalNames{}
+ }
+ if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subjects = append(m.Subjects, v12.ObjectReference{})
+ if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleBindingList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, RoleBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleBindingRestriction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleBindingRestriction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleBindingRestriction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleBindingRestrictionList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleBindingRestrictionList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleBindingRestrictionList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, RoleBindingRestriction{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleBindingRestrictionSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleBindingRestrictionSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleBindingRestrictionSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserRestriction", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UserRestriction == nil {
+ m.UserRestriction = &UserRestriction{}
+ }
+ if err := m.UserRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupRestriction", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GroupRestriction == nil {
+ m.GroupRestriction = &GroupRestriction{}
+ }
+ if err := m.GroupRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountRestriction", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ServiceAccountRestriction == nil {
+ m.ServiceAccountRestriction = &ServiceAccountRestriction{}
+ }
+ if err := m.ServiceAccountRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RoleList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RoleList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Role{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Scopes == nil {
+ m.Scopes = OptionalScopes{}
+ }
+ if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceAccountReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceAccountReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceAccountReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceAccountRestriction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceAccountRestriction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceAccountRestriction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccounts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceAccounts = append(m.ServiceAccounts, ServiceAccountReference{})
+ if err := m.ServiceAccounts[len(m.ServiceAccounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Scopes == nil {
+ m.Scopes = OptionalScopes{}
+ }
+ if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SubjectAccessReviewResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SubjectAccessReviewResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SubjectAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Allowed = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EvaluationError = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SubjectRulesReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SubjectRulesReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SubjectRulesReviewSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SubjectRulesReviewSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Scopes == nil {
+ m.Scopes = OptionalScopes{}
+ }
+ if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, PolicyRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EvaluationError = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UserRestriction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UserRestriction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UserRestriction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Selectors = append(m.Selectors, v1.LabelSelector{})
+ if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto
new file mode 100644
index 0000000000..4be3f6c762
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto
@@ -0,0 +1,561 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.authorization.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/api/rbac/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/authorization/v1";
+
+// Action describes a request to the API server
+message Action {
+ // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
+ optional string namespace = 1;
+
+ // Verb is one of: get, list, watch, create, update, delete
+ optional string verb = 2;
+
+ // Group is the API group of the resource
+ // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined
+ optional string resourceAPIGroup = 3;
+
+ // Version is the API version of the resource
+ // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined
+ optional string resourceAPIVersion = 4;
+
+ // Resource is one of the existing resource types
+ optional string resource = 5;
+
+ // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete"
+ optional string resourceName = 6;
+
+ // Path is the path of a non resource URL
+ optional string path = 8;
+
+ // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)
+ optional bool isNonResourceURL = 9;
+
+ // Content is the actual content of the request for create and update
+ // +kubebuilder:pruning:PreserveUnknownFields
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension content = 7;
+}
+
+// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ClusterRole {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Rules holds all the PolicyRules for this ClusterRole
+ repeated PolicyRule rules = 2;
+
+ // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+ // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+ // stomped by the controller.
+ optional k8s.io.api.rbac.v1.AggregationRule aggregationRule = 3;
+}
+
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace.
+// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in.
+// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ClusterRoleBinding {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // UserNames holds all the usernames directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ optional OptionalNames userNames = 2;
+
+ // GroupNames holds all the groups directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ optional OptionalNames groupNames = 3;
+
+ // Subjects hold object references to authorize with this rule.
+ // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
+ // Thus newer clients that do not need to support backwards compatibility should send
+ // only fully qualified Subjects and should omit the UserNames and GroupNames fields.
+ // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
+ repeated k8s.io.api.core.v1.ObjectReference subjects = 4;
+
+ // RoleRef can only reference the current namespace and the global namespace.
+ // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error.
+ // Since Policy is a singleton, this is sufficient knowledge to locate a role.
+ optional k8s.io.api.core.v1.ObjectReference roleRef = 5;
+}
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ClusterRoleBindingList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of ClusterRoleBindings
+ repeated ClusterRoleBinding items = 2;
+}
+
+// ClusterRoleList is a collection of ClusterRoles
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ClusterRoleList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of ClusterRoles
+ repeated ClusterRole items = 2;
+}
+
+// GroupRestriction matches a group either by a string match on the group name
+// or a label selector applied to group labels.
+message GroupRestriction {
+ // Groups is a list of groups used to match against an individual user's
+ // groups. If the user is a member of one of the whitelisted groups, the user
+ // is allowed to be bound to a role.
+ // +nullable
+ repeated string groups = 1;
+
+ // Selectors specifies a list of label selectors over group labels.
+ // +nullable
+ repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 2;
+}
+
+// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message IsPersonalSubjectAccessReview {
+}
+
+// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message LocalResourceAccessReview {
+ // Action describes the action being tested. The Namespace element is FORCED to the current namespace.
+ optional Action Action = 1;
+}
+
+// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message LocalSubjectAccessReview {
+ // Action describes the action being tested. The Namespace element is FORCED to the current namespace.
+ optional Action Action = 1;
+
+ // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ optional string user = 2;
+
+ // Groups is optional. Groups is the list of groups to which the User belongs.
+ // +k8s:conversion-gen=false
+ repeated string groups = 3;
+
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // Nil for a self-SAR, means "use the scopes on this request".
+ // Nil for a regular SAR, means the same as empty.
+ // +k8s:conversion-gen=false
+ optional OptionalScopes scopes = 4;
+}
+
+// NamedClusterRole relates a name with a cluster role
+message NamedClusterRole {
+ // Name is the name of the cluster role
+ optional string name = 1;
+
+ // Role is the cluster role being named
+ optional ClusterRole role = 2;
+}
+
+// NamedClusterRoleBinding relates a name with a cluster role binding
+message NamedClusterRoleBinding {
+ // Name is the name of the cluster role binding
+ optional string name = 1;
+
+ // RoleBinding is the cluster role binding being named
+ optional ClusterRoleBinding roleBinding = 2;
+}
+
+// NamedRole relates a Role with a name
+message NamedRole {
+ // Name is the name of the role
+ optional string name = 1;
+
+ // Role is the role being named
+ optional Role role = 2;
+}
+
+// NamedRoleBinding relates a role binding with a name
+message NamedRoleBinding {
+ // Name is the name of the role binding
+ optional string name = 1;
+
+ // RoleBinding is the role binding being named
+ optional RoleBinding roleBinding = 2;
+}
+
+// OptionalNames is an array that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message OptionalNames {
+ // items, if empty, will result in an empty slice
+
+ repeated string items = 1;
+}
+
+// OptionalScopes is an array that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message OptionalScopes {
+ // items, if empty, will result in an empty slice
+
+ repeated string items = 1;
+}
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+message PolicyRule {
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ repeated string verbs = 1;
+
+ // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
+ // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
+ // +kubebuilder:pruning:PreserveUnknownFields
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension attributeRestrictions = 2;
+
+ // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
+ // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request
+ // will be allowed
+ // +optional
+ // +nullable
+ repeated string apiGroups = 3;
+
+ // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ repeated string resources = 4;
+
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ repeated string resourceNames = 5;
+
+ // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
+ repeated string nonResourceURLs = 6;
+}
+
+// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the
+// action specified by spec
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ResourceAccessReview {
+ // Action describes the action being tested.
+ optional Action Action = 1;
+}
+
+// ResourceAccessReviewResponse describes who can perform the action
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ResourceAccessReviewResponse {
+ // Namespace is the namespace used for the access review
+ optional string namespace = 1;
+
+ // UsersSlice is the list of users who can perform the action
+ // +k8s:conversion-gen=false
+ repeated string users = 2;
+
+ // GroupsSlice is the list of groups who can perform the action
+ // +k8s:conversion-gen=false
+ repeated string groups = 3;
+
+ // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned.
+ // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
+ // most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
+ optional string evalutionError = 4;
+}
+
+// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message Role {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Rules holds all the PolicyRules for this Role
+ repeated PolicyRule rules = 2;
+}
+
+// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace.
+// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in.
+// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message RoleBinding {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // UserNames holds all the usernames directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ optional OptionalNames userNames = 2;
+
+ // GroupNames holds all the groups directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ optional OptionalNames groupNames = 3;
+
+ // Subjects hold object references to authorize with this rule.
+ // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
+ // Thus newer clients that do not need to support backwards compatibility should send
+ // only fully qualified Subjects and should omit the UserNames and GroupNames fields.
+ // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
+ repeated k8s.io.api.core.v1.ObjectReference subjects = 4;
+
+ // RoleRef can only reference the current namespace and the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // Since Policy is a singleton, this is sufficient knowledge to locate a role.
+ optional k8s.io.api.core.v1.ObjectReference roleRef = 5;
+}
+
+// RoleBindingList is a collection of RoleBindings
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message RoleBindingList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of RoleBindings
+ repeated RoleBinding items = 2;
+}
+
+// RoleBindingRestriction is an object that can be matched against a subject
+// (user, group, or service account) to determine whether rolebindings on that
+// subject are allowed in the namespace to which the RoleBindingRestriction
+// belongs. If any one of those RoleBindingRestriction objects matches
+// a subject, rolebindings on that subject in the namespace are allowed.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=rolebindingrestrictions,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01
+// +openshift:compatibility-gen:level=1
+message RoleBindingRestriction {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the matcher.
+ optional RoleBindingRestrictionSpec spec = 2;
+}
+
+// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message RoleBindingRestrictionList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of RoleBindingRestriction objects.
+ repeated RoleBindingRestriction items = 2;
+}
+
+// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one
+// field must be non-nil.
+message RoleBindingRestrictionSpec {
+ // UserRestriction matches against user subjects.
+ // +nullable
+ optional UserRestriction userrestriction = 1;
+
+ // GroupRestriction matches against group subjects.
+ // +nullable
+ optional GroupRestriction grouprestriction = 2;
+
+ // ServiceAccountRestriction matches against service-account subjects.
+ // +nullable
+ optional ServiceAccountRestriction serviceaccountrestriction = 3;
+}
+
+// RoleList is a collection of Roles
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message RoleList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of Roles
+ repeated Role items = 2;
+}
+
+// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message SelfSubjectRulesReview {
+ // Spec adds information about how to conduct the check
+ optional SelfSubjectRulesReviewSpec spec = 1;
+
+ // Status is completed by the server to tell which permissions you have
+ optional SubjectRulesReviewStatus status = 2;
+}
+
+// SelfSubjectRulesReviewSpec adds information about how to conduct the check
+message SelfSubjectRulesReviewSpec {
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // Nil means "use the scopes on this request".
+ // +k8s:conversion-gen=false
+ optional OptionalScopes scopes = 1;
+}
+
+// ServiceAccountReference specifies a service account and namespace by their
+// names.
+message ServiceAccountReference {
+ // Name is the name of the service account.
+ optional string name = 1;
+
+ // Namespace is the namespace of the service account. Service accounts from
+ // inside the whitelisted namespaces are allowed to be bound to roles. If
+ // Namespace is empty, then the namespace of the RoleBindingRestriction in
+ // which the ServiceAccountReference is embedded is used.
+ optional string namespace = 2;
+}
+
+// ServiceAccountRestriction matches a service account by a string match on
+// either the service-account name or the name of the service account's
+// namespace.
+message ServiceAccountRestriction {
+ // ServiceAccounts specifies a list of literal service-account names.
+ repeated ServiceAccountReference serviceaccounts = 1;
+
+ // Namespaces specifies a list of literal namespace names.
+ repeated string namespaces = 2;
+}
+
+// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message SubjectAccessReview {
+ // Action describes the action being tested.
+ optional Action Action = 1;
+
+ // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ optional string user = 2;
+
+ // GroupsSlice is optional. Groups is the list of groups to which the User belongs.
+ // +k8s:conversion-gen=false
+ repeated string groups = 3;
+
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // Nil for a self-SAR, means "use the scopes on this request".
+ // Nil for a regular SAR, means the same as empty.
+ // +k8s:conversion-gen=false
+ optional OptionalScopes scopes = 4;
+}
+
+// SubjectAccessReviewResponse describes whether or not a user or group can perform an action
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message SubjectAccessReviewResponse {
+ // Namespace is the namespace used for the access review
+ optional string namespace = 1;
+
+ // Allowed is required. True if the action would be allowed, false otherwise.
+ optional bool allowed = 2;
+
+ // Reason is optional. It indicates why a request was allowed or denied.
+ optional string reason = 3;
+
+ // EvaluationError is an indication that some error occurred during the authorization check.
+ // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
+ // most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
+ optional string evaluationError = 4;
+}
+
+// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message SubjectRulesReview {
+ // Spec adds information about how to conduct the check
+ optional SubjectRulesReviewSpec spec = 1;
+
+ // Status is completed by the server to tell which permissions you have
+ optional SubjectRulesReviewStatus status = 2;
+}
+
+// SubjectRulesReviewSpec adds information about how to conduct the check
+message SubjectRulesReviewSpec {
+ // User is optional. At least one of User and Groups must be specified.
+ optional string user = 1;
+
+ // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.
+ repeated string groups = 2;
+
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ optional OptionalScopes scopes = 3;
+}
+
+// SubjectRulesReviewStatus is contains the result of a rules check
+message SubjectRulesReviewStatus {
+ // Rules is the list of rules (no particular sort) that are allowed for the subject
+ repeated PolicyRule rules = 1;
+
+ // EvaluationError can appear in combination with Rules. It means some error happened during evaluation
+ // that may have prevented additional rules from being populated.
+ optional string evaluationError = 2;
+}
+
+// UserRestriction matches a user either by a string match on the user name,
+// a string match on the name of a group to which the user belongs, or a label
+// selector applied to the user labels.
+message UserRestriction {
+ // Users specifies a list of literal user names.
+ repeated string users = 1;
+
+ // Groups specifies a list of literal group names.
+ // +nullable
+ repeated string groups = 2;
+
+ // Selectors specifies a list of label selectors over user labels.
+ // +nullable
+ repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 3;
+}
+
diff --git a/vendor/github.com/openshift/api/authorization/v1/legacy.go b/vendor/github.com/openshift/api/authorization/v1/legacy.go
new file mode 100644
index 0000000000..f437a242ea
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/legacy.go
@@ -0,0 +1,43 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &Role{},
+ &RoleBinding{},
+ &RoleBindingList{},
+ &RoleList{},
+
+ &SelfSubjectRulesReview{},
+ &SubjectRulesReview{},
+ &ResourceAccessReview{},
+ &SubjectAccessReview{},
+ &LocalResourceAccessReview{},
+ &LocalSubjectAccessReview{},
+ &ResourceAccessReviewResponse{},
+ &SubjectAccessReviewResponse{},
+ &IsPersonalSubjectAccessReview{},
+
+ &ClusterRole{},
+ &ClusterRoleBinding{},
+ &ClusterRoleBindingList{},
+ &ClusterRoleList{},
+
+ &RoleBindingRestriction{},
+ &RoleBindingRestrictionList{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/authorization/v1/register.go b/vendor/github.com/openshift/api/authorization/v1/register.go
new file mode 100644
index 0000000000..f1e12477b6
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/register.go
@@ -0,0 +1,60 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "authorization.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &Role{},
+ &RoleBinding{},
+ &RoleBindingList{},
+ &RoleList{},
+
+ &SelfSubjectRulesReview{},
+ &SubjectRulesReview{},
+ &ResourceAccessReview{},
+ &SubjectAccessReview{},
+ &LocalResourceAccessReview{},
+ &LocalSubjectAccessReview{},
+ &ResourceAccessReviewResponse{},
+ &SubjectAccessReviewResponse{},
+ &IsPersonalSubjectAccessReview{},
+
+ &ClusterRole{},
+ &ClusterRoleBinding{},
+ &ClusterRoleBindingList{},
+ &ClusterRoleList{},
+
+ &RoleBindingRestriction{},
+ &RoleBindingRestrictionList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go
new file mode 100644
index 0000000000..11a71e65dd
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/types.go
@@ -0,0 +1,636 @@
+package v1
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kruntime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Authorization is calculated against
+// 1. all deny RoleBinding PolicyRules in the master namespace - short circuit on match
+// 2. all allow RoleBinding PolicyRules in the master namespace - short circuit on match
+// 3. all deny RoleBinding PolicyRules in the namespace - short circuit on match
+// 4. all allow RoleBinding PolicyRules in the namespace - short circuit on match
+// 5. deny by default
+
+const (
+ // GroupKind is string representation of kind used in role binding subjects that represents the "group".
+ GroupKind = "Group"
+ // UserKind is string representation of kind used in role binding subjects that represents the "user".
+ UserKind = "User"
+
+ ScopesKey = "scopes.authorization.openshift.io"
+)
+
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
+type PolicyRule struct {
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
+ // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
+ // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
+ // +kubebuilder:pruning:PreserveUnknownFields
+ AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"`
+ // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
+ // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request
+ // will be allowed
+ // +optional
+ // +nullable
+ APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"`
+ // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"`
+ // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
+ NonResourceURLsSlice []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type IsPersonalSubjectAccessReview struct {
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Role struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Rules holds all the PolicyRules for this Role
+ Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+}
+
+// OptionalNames is an array that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type OptionalNames []string
+
+func (t OptionalNames) String() string {
+ return fmt.Sprintf("%v", []string(t))
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace.
+// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in.
+// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type RoleBinding struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // UserNames holds all the usernames directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"`
+ // GroupNames holds all the groups directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"`
+ // Subjects hold object references to authorize with this rule.
+ // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
+ // Thus newer clients that do not need to support backwards compatibility should send
+ // only fully qualified Subjects and should omit the UserNames and GroupNames fields.
+ // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
+ Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"`
+
+ // RoleRef can only reference the current namespace and the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // Since Policy is a singleton, this is sufficient knowledge to locate a role.
+ RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"`
+}
+
+// NamedRole relates a Role with a name
+type NamedRole struct {
+ // Name is the name of the role
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Role is the role being named
+ Role Role `json:"role" protobuf:"bytes,2,opt,name=role"`
+}
+
+// NamedRoleBinding relates a role binding with a name
+type NamedRoleBinding struct {
+ // Name is the name of the role binding
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // RoleBinding is the role binding being named
+ RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=create
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type SelfSubjectRulesReview struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Spec adds information about how to conduct the check
+ Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
+
+ // Status is completed by the server to tell which permissions you have
+ Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+}
+
+// SelfSubjectRulesReviewSpec adds information about how to conduct the check
+type SelfSubjectRulesReviewSpec struct {
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // Nil means "use the scopes on this request".
+ // +k8s:conversion-gen=false
+ Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=create
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type SubjectRulesReview struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Spec adds information about how to conduct the check
+ Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
+
+ // Status is completed by the server to tell which permissions you have
+ Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+}
+
+// SubjectRulesReviewSpec adds information about how to conduct the check
+type SubjectRulesReviewSpec struct {
+ // User is optional. At least one of User and Groups must be specified.
+ User string `json:"user" protobuf:"bytes,1,opt,name=user"`
+ // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.
+ Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"`
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"`
+}
+
+// SubjectRulesReviewStatus is contains the result of a rules check
+type SubjectRulesReviewStatus struct {
+ // Rules is the list of rules (no particular sort) that are allowed for the subject
+ Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"`
+ // EvaluationError can appear in combination with Rules. It means some error happened during evaluation
+ // that may have prevented additional rules from being populated.
+ EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ResourceAccessReviewResponse describes who can perform the action
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ResourceAccessReviewResponse struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Namespace is the namespace used for the access review
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+ // UsersSlice is the list of users who can perform the action
+ // +k8s:conversion-gen=false
+ UsersSlice []string `json:"users" protobuf:"bytes,2,rep,name=users"`
+ // GroupsSlice is the list of groups who can perform the action
+ // +k8s:conversion-gen=false
+ GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
+
+ // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned.
+ // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
+ // most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
+ EvaluationError string `json:"evalutionError" protobuf:"bytes,4,opt,name=evalutionError"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
+// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the
+// action specified by spec
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ResourceAccessReview struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Action describes the action being tested.
+ Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SubjectAccessReviewResponse describes whether or not a user or group can perform an action
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type SubjectAccessReviewResponse struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Namespace is the namespace used for the access review
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+ // Allowed is required. True if the action would be allowed, false otherwise.
+ Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
+ // Reason is optional. It indicates why a request was allowed or denied.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+ // EvaluationError is an indication that some error occurred during the authorization check.
+ // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
+ // most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
+ EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"`
+}
+
+// OptionalScopes is an array that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type OptionalScopes []string
+
+func (t OptionalScopes) String() string {
+ return fmt.Sprintf("%v", []string(t))
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
+// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type SubjectAccessReview struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Action describes the action being tested.
+ Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
+ // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ User string `json:"user" protobuf:"bytes,2,opt,name=user"`
+ // GroupsSlice is optional. Groups is the list of groups to which the User belongs.
+ // +k8s:conversion-gen=false
+ GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // Nil for a self-SAR, means "use the scopes on this request".
+ // Nil for a regular SAR, means the same as empty.
+ // +k8s:conversion-gen=false
+ Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"`
+}
+
+// +genclient
+// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
+// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type LocalResourceAccessReview struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Action describes the action being tested. The Namespace element is FORCED to the current namespace.
+ Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
+}
+
+// +genclient
+// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch
+// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type LocalSubjectAccessReview struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Action describes the action being tested. The Namespace element is FORCED to the current namespace.
+ Action `json:",inline" protobuf:"bytes,1,opt,name=Action"`
+ // User is optional. If both User and Groups are empty, the current authenticated user is used.
+ User string `json:"user" protobuf:"bytes,2,opt,name=user"`
+ // Groups is optional. Groups is the list of groups to which the User belongs.
+ // +k8s:conversion-gen=false
+ GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"`
+ // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
+ // Nil for a self-SAR, means "use the scopes on this request".
+ // Nil for a regular SAR, means the same as empty.
+ // +k8s:conversion-gen=false
+ Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"`
+}
+
+// Action describes a request to the API server
+type Action struct {
+ // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
+ Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+ // Verb is one of: get, list, watch, create, update, delete
+ Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"`
+ // Group is the API group of the resource
+ // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined
+ Group string `json:"resourceAPIGroup" protobuf:"bytes,3,opt,name=resourceAPIGroup"`
+ // Version is the API version of the resource
+ // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined
+ Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"`
+ // Resource is one of the existing resource types
+ Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"`
+ // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete"
+ ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"`
+ // Path is the path of a non resource URL
+ Path string `json:"path" protobuf:"bytes,8,opt,name=path"`
+ // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)
+ IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"`
+ // Content is the actual content of the request for create and update
+ // +kubebuilder:pruning:PreserveUnknownFields
+ Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBindingList is a collection of RoleBindings
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type RoleBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of RoleBindings
+ Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleList is a collection of Roles
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type RoleList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of Roles
+ Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterRole struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Rules holds all the PolicyRules for this ClusterRole
+ Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+
+ // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+ // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+ // stomped by the controller.
+ AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace.
+// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in.
+// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterRoleBinding struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // UserNames holds all the usernames directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"`
+ // GroupNames holds all the groups directly bound to the role.
+ // This field should only be specified when supporting legacy clients and servers.
+ // See Subjects for further details.
+ // +k8s:conversion-gen=false
+ // +optional
+ GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"`
+ // Subjects hold object references to authorize with this rule.
+ // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers.
+ // Thus newer clients that do not need to support backwards compatibility should send
+ // only fully qualified Subjects and should omit the UserNames and GroupNames fields.
+ // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.
+ Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"`
+
+ // RoleRef can only reference the current namespace and the global namespace.
+ // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error.
+ // Since Policy is a singleton, this is sufficient knowledge to locate a role.
+ RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"`
+}
+
+// NamedClusterRole relates a name with a cluster role
+type NamedClusterRole struct {
+ // Name is the name of the cluster role
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Role is the cluster role being named
+ Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"`
+}
+
+// NamedClusterRoleBinding relates a name with a cluster role binding
+type NamedClusterRoleBinding struct {
+ // Name is the name of the cluster role binding
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // RoleBinding is the cluster role binding being named
+ RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleBindingList is a collection of ClusterRoleBindings
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterRoleBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of ClusterRoleBindings
+ Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterRoleList is a collection of ClusterRoles
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterRoleList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of ClusterRoles
+ Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBindingRestriction is an object that can be matched against a subject
+// (user, group, or service account) to determine whether rolebindings on that
+// subject are allowed in the namespace to which the RoleBindingRestriction
+// belongs. If any one of those RoleBindingRestriction objects matches
+// a subject, rolebindings on that subject in the namespace are allowed.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=rolebindingrestrictions,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01
+// +openshift:compatibility-gen:level=1
+type RoleBindingRestriction struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the matcher.
+ Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one
+// field must be non-nil.
+type RoleBindingRestrictionSpec struct {
+ // UserRestriction matches against user subjects.
+ // +nullable
+ UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"`
+
+ // GroupRestriction matches against group subjects.
+ // +nullable
+ GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"`
+
+ // ServiceAccountRestriction matches against service-account subjects.
+ // +nullable
+ ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type RoleBindingRestrictionList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of RoleBindingRestriction objects.
+ Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// UserRestriction matches a user either by a string match on the user name,
+// a string match on the name of a group to which the user belongs, or a label
+// selector applied to the user labels.
+type UserRestriction struct {
+ // Users specifies a list of literal user names.
+ Users []string `json:"users" protobuf:"bytes,1,rep,name=users"`
+
+ // Groups specifies a list of literal group names.
+ // +nullable
+ Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"`
+
+ // Selectors specifies a list of label selectors over user labels.
+ // +nullable
+ Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,3,rep,name=labels"`
+}
+
+// GroupRestriction matches a group either by a string match on the group name
+// or a label selector applied to group labels.
+type GroupRestriction struct {
+ // Groups is a list of groups used to match against an individual user's
+ // groups. If the user is a member of one of the whitelisted groups, the user
+ // is allowed to be bound to a role.
+ // +nullable
+ Groups []string `json:"groups" protobuf:"bytes,1,rep,name=groups"`
+
+ // Selectors specifies a list of label selectors over group labels.
+ // +nullable
+ Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,2,rep,name=labels"`
+}
+
+// ServiceAccountRestriction matches a service account by a string match on
+// either the service-account name or the name of the service account's
+// namespace.
+type ServiceAccountRestriction struct {
+ // ServiceAccounts specifies a list of literal service-account names.
+ ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"`
+
+ // Namespaces specifies a list of literal namespace names.
+ Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
+}
+
+// ServiceAccountReference specifies a service account and namespace by their
+// names.
+type ServiceAccountReference struct {
+ // Name is the name of the service account.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // Namespace is the namespace of the service account. Service accounts from
+ // inside the whitelisted namespaces are allowed to be bound to roles. If
+ // Namespace is empty, then the namespace of the RoleBindingRestriction in
+ // which the ServiceAccountReference is embedded is used.
+ Namespace string `json:"namespace" protobuf:"bytes,2,opt,name=namespace"`
+}
diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..1214fc02bf
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go
@@ -0,0 +1,994 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Action) DeepCopyInto(out *Action) {
+ *out = *in
+ in.Content.DeepCopyInto(&out.Content)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Action.
+func (in *Action) DeepCopy() *Action {
+ if in == nil {
+ return nil
+ }
+ out := new(Action)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRole) DeepCopyInto(out *ClusterRole) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AggregationRule != nil {
+ in, out := &in.AggregationRule, &out.AggregationRule
+ *out = new(rbacv1.AggregationRule)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole.
+func (in *ClusterRole) DeepCopy() *ClusterRole {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterRole)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRole) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.UserNames != nil {
+ in, out := &in.UserNames, &out.UserNames
+ *out = make(OptionalNames, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupNames != nil {
+ in, out := &in.GroupNames, &out.GroupNames
+ *out = make(OptionalNames, len(*in))
+ copy(*out, *in)
+ }
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]corev1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ out.RoleRef = in.RoleRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding.
+func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterRoleBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterRoleBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList.
+func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterRoleBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterRole, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList.
+func (in *ClusterRoleList) DeepCopy() *ClusterRoleList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterRoleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterRoleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupRestriction) DeepCopyInto(out *GroupRestriction) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Selectors != nil {
+ in, out := &in.Selectors, &out.Selectors
+ *out = make([]metav1.LabelSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupRestriction.
+func (in *GroupRestriction) DeepCopy() *GroupRestriction {
+ if in == nil {
+ return nil
+ }
+ out := new(GroupRestriction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IsPersonalSubjectAccessReview) DeepCopyInto(out *IsPersonalSubjectAccessReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsPersonalSubjectAccessReview.
+func (in *IsPersonalSubjectAccessReview) DeepCopy() *IsPersonalSubjectAccessReview {
+ if in == nil {
+ return nil
+ }
+ out := new(IsPersonalSubjectAccessReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IsPersonalSubjectAccessReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalResourceAccessReview) DeepCopyInto(out *LocalResourceAccessReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Action.DeepCopyInto(&out.Action)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResourceAccessReview.
+func (in *LocalResourceAccessReview) DeepCopy() *LocalResourceAccessReview {
+ if in == nil {
+ return nil
+ }
+ out := new(LocalResourceAccessReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LocalResourceAccessReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Action.DeepCopyInto(&out.Action)
+ if in.GroupsSlice != nil {
+ in, out := &in.GroupsSlice, &out.GroupsSlice
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make(OptionalScopes, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview.
+func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview {
+ if in == nil {
+ return nil
+ }
+ out := new(LocalSubjectAccessReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedClusterRole) DeepCopyInto(out *NamedClusterRole) {
+ *out = *in
+ in.Role.DeepCopyInto(&out.Role)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRole.
+func (in *NamedClusterRole) DeepCopy() *NamedClusterRole {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedClusterRole)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedClusterRoleBinding) DeepCopyInto(out *NamedClusterRoleBinding) {
+ *out = *in
+ in.RoleBinding.DeepCopyInto(&out.RoleBinding)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRoleBinding.
+func (in *NamedClusterRoleBinding) DeepCopy() *NamedClusterRoleBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedClusterRoleBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedRole) DeepCopyInto(out *NamedRole) {
+ *out = *in
+ in.Role.DeepCopyInto(&out.Role)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRole.
+func (in *NamedRole) DeepCopy() *NamedRole {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedRole)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedRoleBinding) DeepCopyInto(out *NamedRoleBinding) {
+ *out = *in
+ in.RoleBinding.DeepCopyInto(&out.RoleBinding)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRoleBinding.
+func (in *NamedRoleBinding) DeepCopy() *NamedRoleBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedRoleBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in OptionalNames) DeepCopyInto(out *OptionalNames) {
+ {
+ in := &in
+ *out = make(OptionalNames, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames.
+func (in OptionalNames) DeepCopy() OptionalNames {
+ if in == nil {
+ return nil
+ }
+ out := new(OptionalNames)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in OptionalScopes) DeepCopyInto(out *OptionalScopes) {
+ {
+ in := &in
+ *out = make(OptionalScopes, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalScopes.
+func (in OptionalScopes) DeepCopy() OptionalScopes {
+ if in == nil {
+ return nil
+ }
+ out := new(OptionalScopes)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
+ *out = *in
+ if in.Verbs != nil {
+ in, out := &in.Verbs, &out.Verbs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.AttributeRestrictions.DeepCopyInto(&out.AttributeRestrictions)
+ if in.APIGroups != nil {
+ in, out := &in.APIGroups, &out.APIGroups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ResourceNames != nil {
+ in, out := &in.ResourceNames, &out.ResourceNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NonResourceURLsSlice != nil {
+ in, out := &in.NonResourceURLsSlice, &out.NonResourceURLsSlice
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
+func (in *PolicyRule) DeepCopy() *PolicyRule {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceAccessReview) DeepCopyInto(out *ResourceAccessReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Action.DeepCopyInto(&out.Action)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReview.
+func (in *ResourceAccessReview) DeepCopy() *ResourceAccessReview {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceAccessReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceAccessReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceAccessReviewResponse) DeepCopyInto(out *ResourceAccessReviewResponse) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.UsersSlice != nil {
+ in, out := &in.UsersSlice, &out.UsersSlice
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupsSlice != nil {
+ in, out := &in.GroupsSlice, &out.GroupsSlice
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReviewResponse.
+func (in *ResourceAccessReviewResponse) DeepCopy() *ResourceAccessReviewResponse {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceAccessReviewResponse)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceAccessReviewResponse) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Role) DeepCopyInto(out *Role) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role.
+func (in *Role) DeepCopy() *Role {
+ if in == nil {
+ return nil
+ }
+ out := new(Role)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Role) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.UserNames != nil {
+ in, out := &in.UserNames, &out.UserNames
+ *out = make(OptionalNames, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupNames != nil {
+ in, out := &in.GroupNames, &out.GroupNames
+ *out = make(OptionalNames, len(*in))
+ copy(*out, *in)
+ }
+ if in.Subjects != nil {
+ in, out := &in.Subjects, &out.Subjects
+ *out = make([]corev1.ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ out.RoleRef = in.RoleRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding.
+func (in *RoleBinding) DeepCopy() *RoleBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]RoleBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList.
+func (in *RoleBindingList) DeepCopy() *RoleBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingRestriction) DeepCopyInto(out *RoleBindingRestriction) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestriction.
+func (in *RoleBindingRestriction) DeepCopy() *RoleBindingRestriction {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleBindingRestriction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBindingRestriction) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingRestrictionList) DeepCopyInto(out *RoleBindingRestrictionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]RoleBindingRestriction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionList.
+func (in *RoleBindingRestrictionList) DeepCopy() *RoleBindingRestrictionList {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleBindingRestrictionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleBindingRestrictionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleBindingRestrictionSpec) DeepCopyInto(out *RoleBindingRestrictionSpec) {
+ *out = *in
+ if in.UserRestriction != nil {
+ in, out := &in.UserRestriction, &out.UserRestriction
+ *out = new(UserRestriction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GroupRestriction != nil {
+ in, out := &in.GroupRestriction, &out.GroupRestriction
+ *out = new(GroupRestriction)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ServiceAccountRestriction != nil {
+ in, out := &in.ServiceAccountRestriction, &out.ServiceAccountRestriction
+ *out = new(ServiceAccountRestriction)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionSpec.
+func (in *RoleBindingRestrictionSpec) DeepCopy() *RoleBindingRestrictionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleBindingRestrictionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoleList) DeepCopyInto(out *RoleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Role, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList.
+func (in *RoleList) DeepCopy() *RoleList {
+ if in == nil {
+ return nil
+ }
+ out := new(RoleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview.
+func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview {
+ if in == nil {
+ return nil
+ }
+ out := new(SelfSubjectRulesReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) {
+ *out = *in
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make(OptionalScopes, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec.
+func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SelfSubjectRulesReviewSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountReference) DeepCopyInto(out *ServiceAccountReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountReference.
+func (in *ServiceAccountReference) DeepCopy() *ServiceAccountReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountRestriction) DeepCopyInto(out *ServiceAccountRestriction) {
+ *out = *in
+ if in.ServiceAccounts != nil {
+ in, out := &in.ServiceAccounts, &out.ServiceAccounts
+ *out = make([]ServiceAccountReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.Namespaces != nil {
+ in, out := &in.Namespaces, &out.Namespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountRestriction.
+func (in *ServiceAccountRestriction) DeepCopy() *ServiceAccountRestriction {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountRestriction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Action.DeepCopyInto(&out.Action)
+ if in.GroupsSlice != nil {
+ in, out := &in.GroupsSlice, &out.GroupsSlice
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make(OptionalScopes, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview.
+func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview {
+ if in == nil {
+ return nil
+ }
+ out := new(SubjectAccessReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubjectAccessReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectAccessReviewResponse) DeepCopyInto(out *SubjectAccessReviewResponse) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewResponse.
+func (in *SubjectAccessReviewResponse) DeepCopy() *SubjectAccessReviewResponse {
+ if in == nil {
+ return nil
+ }
+ out := new(SubjectAccessReviewResponse)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubjectAccessReviewResponse) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectRulesReview) DeepCopyInto(out *SubjectRulesReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReview.
+func (in *SubjectRulesReview) DeepCopy() *SubjectRulesReview {
+ if in == nil {
+ return nil
+ }
+ out := new(SubjectRulesReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubjectRulesReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectRulesReviewSpec) DeepCopyInto(out *SubjectRulesReviewSpec) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make(OptionalScopes, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewSpec.
+func (in *SubjectRulesReviewSpec) DeepCopy() *SubjectRulesReviewSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SubjectRulesReviewSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) {
+ *out = *in
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]PolicyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus.
+func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SubjectRulesReviewStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserRestriction) DeepCopyInto(out *UserRestriction) {
+ *out = *in
+ if in.Users != nil {
+ in, out := &in.Users, &out.Users
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Selectors != nil {
+ in, out := &in.Selectors, &out.Selectors
+ *out = make([]metav1.LabelSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserRestriction.
+func (in *UserRestriction) DeepCopy() *UserRestriction {
+ if in == nil {
+ return nil
+ }
+ out := new(UserRestriction)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/authorization/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..4756252b9c
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,21 @@
+rolebindingrestrictions.authorization.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: rolebindingrestrictions.authorization.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_03"
+ GroupName: authorization.openshift.io
+ HasStatus: false
+ KindName: RoleBindingRestriction
+ Labels: {}
+ PluralName: rolebindingrestrictions
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..34777dc958
--- /dev/null
+++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,364 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Action = map[string]string{
+ "": "Action describes a request to the API server",
+ "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces",
+ "verb": "Verb is one of: get, list, watch, create, update, delete",
+ "resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined",
+ "resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined",
+ "resource": "Resource is one of the existing resource types",
+ "resourceName": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"",
+ "path": "Path is the path of a non resource URL",
+ "isNonResourceURL": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)",
+ "content": "Content is the actual content of the request for create and update",
+}
+
+func (Action) SwaggerDoc() map[string]string {
+ return map_Action
+}
+
+var map_ClusterRole = map[string]string{
+ "": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "rules": "Rules holds all the PolicyRules for this ClusterRole",
+ "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
+}
+
+func (ClusterRole) SwaggerDoc() map[string]string {
+ return map_ClusterRole
+}
+
+var map_ClusterRoleBinding = map[string]string{
+ "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
+ "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
+}
+
+func (ClusterRoleBinding) SwaggerDoc() map[string]string {
+ return map_ClusterRoleBinding
+}
+
+var map_ClusterRoleBindingList = map[string]string{
+ "": "ClusterRoleBindingList is a collection of ClusterRoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of ClusterRoleBindings",
+}
+
+func (ClusterRoleBindingList) SwaggerDoc() map[string]string {
+ return map_ClusterRoleBindingList
+}
+
+var map_ClusterRoleList = map[string]string{
+ "": "ClusterRoleList is a collection of ClusterRoles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of ClusterRoles",
+}
+
+func (ClusterRoleList) SwaggerDoc() map[string]string {
+ return map_ClusterRoleList
+}
+
+var map_GroupRestriction = map[string]string{
+ "": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.",
+ "groups": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.",
+ "labels": "Selectors specifies a list of label selectors over group labels.",
+}
+
+func (GroupRestriction) SwaggerDoc() map[string]string {
+ return map_GroupRestriction
+}
+
+var map_IsPersonalSubjectAccessReview = map[string]string{
+ "": "IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+}
+
+func (IsPersonalSubjectAccessReview) SwaggerDoc() map[string]string {
+ return map_IsPersonalSubjectAccessReview
+}
+
+var map_LocalResourceAccessReview = map[string]string{
+ "": "LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+}
+
+func (LocalResourceAccessReview) SwaggerDoc() map[string]string {
+ return map_LocalResourceAccessReview
+}
+
+var map_LocalSubjectAccessReview = map[string]string{
+ "": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.",
+ "groups": "Groups is optional. Groups is the list of groups to which the User belongs.",
+ "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
+}
+
+func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
+ return map_LocalSubjectAccessReview
+}
+
+var map_NamedClusterRole = map[string]string{
+ "": "NamedClusterRole relates a name with a cluster role",
+ "name": "Name is the name of the cluster role",
+ "role": "Role is the cluster role being named",
+}
+
+func (NamedClusterRole) SwaggerDoc() map[string]string {
+ return map_NamedClusterRole
+}
+
+var map_NamedClusterRoleBinding = map[string]string{
+ "": "NamedClusterRoleBinding relates a name with a cluster role binding",
+ "name": "Name is the name of the cluster role binding",
+ "roleBinding": "RoleBinding is the cluster role binding being named",
+}
+
+func (NamedClusterRoleBinding) SwaggerDoc() map[string]string {
+ return map_NamedClusterRoleBinding
+}
+
+var map_NamedRole = map[string]string{
+ "": "NamedRole relates a Role with a name",
+ "name": "Name is the name of the role",
+ "role": "Role is the role being named",
+}
+
+func (NamedRole) SwaggerDoc() map[string]string {
+ return map_NamedRole
+}
+
+var map_NamedRoleBinding = map[string]string{
+ "": "NamedRoleBinding relates a role binding with a name",
+ "name": "Name is the name of the role binding",
+ "roleBinding": "RoleBinding is the role binding being named",
+}
+
+func (NamedRoleBinding) SwaggerDoc() map[string]string {
+ return map_NamedRoleBinding
+}
+
+var map_PolicyRule = map[string]string{
+ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
+ "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
+ "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.",
+ "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed",
+ "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.",
+ "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
+ "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.",
+}
+
+func (PolicyRule) SwaggerDoc() map[string]string {
+ return map_PolicyRule
+}
+
+var map_ResourceAccessReview = map[string]string{
+ "": "ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+}
+
+func (ResourceAccessReview) SwaggerDoc() map[string]string {
+ return map_ResourceAccessReview
+}
+
+var map_ResourceAccessReviewResponse = map[string]string{
+ "": "ResourceAccessReviewResponse describes who can perform the action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "namespace": "Namespace is the namespace used for the access review",
+ "users": "UsersSlice is the list of users who can perform the action",
+ "groups": "GroupsSlice is the list of groups who can perform the action",
+ "evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.",
+}
+
+func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string {
+ return map_ResourceAccessReviewResponse
+}
+
+var map_Role = map[string]string{
+ "": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "rules": "Rules holds all the PolicyRules for this Role",
+}
+
+func (Role) SwaggerDoc() map[string]string {
+ return map_Role
+}
+
+var map_RoleBinding = map[string]string{
+ "": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.",
+ "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.",
+ "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.",
+}
+
+func (RoleBinding) SwaggerDoc() map[string]string {
+ return map_RoleBinding
+}
+
+var map_RoleBindingList = map[string]string{
+ "": "RoleBindingList is a collection of RoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of RoleBindings",
+}
+
+func (RoleBindingList) SwaggerDoc() map[string]string {
+ return map_RoleBindingList
+}
+
+var map_RoleBindingRestriction = map[string]string{
+ "": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the matcher.",
+}
+
+func (RoleBindingRestriction) SwaggerDoc() map[string]string {
+ return map_RoleBindingRestriction
+}
+
+var map_RoleBindingRestrictionList = map[string]string{
+ "": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of RoleBindingRestriction objects.",
+}
+
+func (RoleBindingRestrictionList) SwaggerDoc() map[string]string {
+ return map_RoleBindingRestrictionList
+}
+
+var map_RoleBindingRestrictionSpec = map[string]string{
+ "": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.",
+ "userrestriction": "UserRestriction matches against user subjects.",
+ "grouprestriction": "GroupRestriction matches against group subjects.",
+ "serviceaccountrestriction": "ServiceAccountRestriction matches against service-account subjects.",
+}
+
+func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string {
+ return map_RoleBindingRestrictionSpec
+}
+
+var map_RoleList = map[string]string{
+ "": "RoleList is a collection of Roles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of Roles",
+}
+
+func (RoleList) SwaggerDoc() map[string]string {
+ return map_RoleList
+}
+
+var map_SelfSubjectRulesReview = map[string]string{
+ "": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "spec": "Spec adds information about how to conduct the check",
+ "status": "Status is completed by the server to tell which permissions you have",
+}
+
+func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
+ return map_SelfSubjectRulesReview
+}
+
+var map_SelfSubjectRulesReviewSpec = map[string]string{
+ "": "SelfSubjectRulesReviewSpec adds information about how to conduct the check",
+ "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".",
+}
+
+func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
+ return map_SelfSubjectRulesReviewSpec
+}
+
+var map_ServiceAccountReference = map[string]string{
+ "": "ServiceAccountReference specifies a service account and namespace by their names.",
+ "name": "Name is the name of the service account.",
+ "namespace": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.",
+}
+
+func (ServiceAccountReference) SwaggerDoc() map[string]string {
+ return map_ServiceAccountReference
+}
+
+var map_ServiceAccountRestriction = map[string]string{
+ "": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.",
+ "serviceaccounts": "ServiceAccounts specifies a list of literal service-account names.",
+ "namespaces": "Namespaces specifies a list of literal namespace names.",
+}
+
+func (ServiceAccountRestriction) SwaggerDoc() map[string]string {
+ return map_ServiceAccountRestriction
+}
+
+var map_SubjectAccessReview = map[string]string{
+ "": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.",
+ "groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.",
+ "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.",
+}
+
+func (SubjectAccessReview) SwaggerDoc() map[string]string {
+ return map_SubjectAccessReview
+}
+
+var map_SubjectAccessReviewResponse = map[string]string{
+ "": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "namespace": "Namespace is the namespace used for the access review",
+ "allowed": "Allowed is required. True if the action would be allowed, false otherwise.",
+ "reason": "Reason is optional. It indicates why a request was allowed or denied.",
+ "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.",
+}
+
+func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string {
+ return map_SubjectAccessReviewResponse
+}
+
+var map_SubjectRulesReview = map[string]string{
+ "": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "spec": "Spec adds information about how to conduct the check",
+ "status": "Status is completed by the server to tell which permissions you have",
+}
+
+func (SubjectRulesReview) SwaggerDoc() map[string]string {
+ return map_SubjectRulesReview
+}
+
+var map_SubjectRulesReviewSpec = map[string]string{
+ "": "SubjectRulesReviewSpec adds information about how to conduct the check",
+ "user": "User is optional. At least one of User and Groups must be specified.",
+ "groups": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.",
+ "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".",
+}
+
+func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string {
+ return map_SubjectRulesReviewSpec
+}
+
+var map_SubjectRulesReviewStatus = map[string]string{
+ "": "SubjectRulesReviewStatus is contains the result of a rules check",
+ "rules": "Rules is the list of rules (no particular sort) that are allowed for the subject",
+ "evaluationError": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.",
+}
+
+func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string {
+ return map_SubjectRulesReviewStatus
+}
+
+var map_UserRestriction = map[string]string{
+ "": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.",
+ "users": "Users specifies a list of literal user names.",
+ "groups": "Groups specifies a list of literal group names.",
+ "labels": "Selectors specifies a list of label selectors over user labels.",
+}
+
+func (UserRestriction) SwaggerDoc() map[string]string {
+ return map_UserRestriction
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/build/OWNERS b/vendor/github.com/openshift/api/build/OWNERS
new file mode 100644
index 0000000000..e6d19c798d
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/OWNERS
@@ -0,0 +1,7 @@
+reviewers:
+ - adambkaplan
+ - bparees
+ - sayan-biswas
+emeritus_reviewers:
+ - jim-minter
+ - gabemontero
diff --git a/vendor/github.com/openshift/api/build/install.go b/vendor/github.com/openshift/api/build/install.go
new file mode 100644
index 0000000000..87e2c26b0f
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/install.go
@@ -0,0 +1,26 @@
+package build
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ buildv1 "github.com/openshift/api/build/v1"
+)
+
+const (
+ GroupName = "build.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(buildv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/build/v1/consts.go b/vendor/github.com/openshift/api/build/v1/consts.go
new file mode 100644
index 0000000000..0d9c8f03b3
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/consts.go
@@ -0,0 +1,202 @@
+package v1
+
+// annotations
+const (
+ // BuildAnnotation is an annotation that identifies a Pod as being for a Build
+ BuildAnnotation = "openshift.io/build.name"
+
+ // BuildConfigAnnotation is an annotation that identifies the BuildConfig that a Build was created from
+ BuildConfigAnnotation = "openshift.io/build-config.name"
+
+ // BuildCloneAnnotation is an annotation whose value is the name of the build this build was cloned from
+ BuildCloneAnnotation = "openshift.io/build.clone-of"
+
+ // BuildNumberAnnotation is an annotation whose value is the sequential number for this Build
+ BuildNumberAnnotation = "openshift.io/build.number"
+
+ // BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build
+ BuildPodNameAnnotation = "openshift.io/build.pod-name"
+
+ // BuildJenkinsStatusJSONAnnotation is an annotation holding the Jenkins status information
+ BuildJenkinsStatusJSONAnnotation = "openshift.io/jenkins-status-json"
+
+ // BuildJenkinsLogURLAnnotation is an annotation holding a link to the raw Jenkins build console log
+ BuildJenkinsLogURLAnnotation = "openshift.io/jenkins-log-url"
+
+ // BuildJenkinsConsoleLogURLAnnotation is an annotation holding a link to the Jenkins build console log (including Jenkins chrome wrappering)
+ BuildJenkinsConsoleLogURLAnnotation = "openshift.io/jenkins-console-log-url"
+
+ // BuildJenkinsBlueOceanLogURLAnnotation is an annotation holding a link to the Jenkins build console log via the Jenkins BlueOcean UI Plugin
+ BuildJenkinsBlueOceanLogURLAnnotation = "openshift.io/jenkins-blueocean-log-url"
+
+ // BuildJenkinsBuildURIAnnotation is an annotation holding a link to the Jenkins build
+ BuildJenkinsBuildURIAnnotation = "openshift.io/jenkins-build-uri"
+
+ // BuildSourceSecretMatchURIAnnotationPrefix is a prefix for annotations on a Secret which indicate a source URI against which the Secret can be used
+ BuildSourceSecretMatchURIAnnotationPrefix = "build.openshift.io/source-secret-match-uri-"
+
+ // BuildConfigPausedAnnotation is an annotation that marks a BuildConfig as paused.
+ // New Builds cannot be instantiated from a paused BuildConfig.
+ BuildConfigPausedAnnotation = "openshift.io/build-config.paused"
+)
+
+// labels
+const (
+ // BuildConfigLabel is the key of a Build label whose value is the ID of a BuildConfig
+ // on which the Build is based. NOTE: The value for this label may not contain the entire
+ // BuildConfig name because it will be truncated to maximum label length.
+ BuildConfigLabel = "openshift.io/build-config.name"
+
+ // BuildLabel is the key of a Pod label whose value is the Name of a Build which is run.
+ // NOTE: The value for this label may not contain the entire Build name because it will be
+ // truncated to maximum label length.
+ BuildLabel = "openshift.io/build.name"
+
+ // BuildRunPolicyLabel represents the start policy used to start the build.
+ BuildRunPolicyLabel = "openshift.io/build.start-policy"
+
+ // BuildConfigLabelDeprecated was used as BuildConfigLabel before adding namespaces.
+ // We keep it for backward compatibility.
+ BuildConfigLabelDeprecated = "buildconfig"
+)
+
+const (
+ // StatusReasonError is a generic reason for a build error condition.
+ StatusReasonError StatusReason = "Error"
+
+ // StatusReasonCannotCreateBuildPodSpec is an error condition when the build
+ // strategy cannot create a build pod spec.
+ StatusReasonCannotCreateBuildPodSpec StatusReason = "CannotCreateBuildPodSpec"
+
+ // StatusReasonCannotCreateBuildPod is an error condition when a build pod
+ // cannot be created.
+ StatusReasonCannotCreateBuildPod StatusReason = "CannotCreateBuildPod"
+
+ // StatusReasonInvalidOutputReference is an error condition when the build
+ // output is an invalid reference.
+ StatusReasonInvalidOutputReference StatusReason = "InvalidOutputReference"
+
+ // StatusReasonInvalidImageReference is an error condition when the build
+ // references an invalid image.
+ StatusReasonInvalidImageReference StatusReason = "InvalidImageReference"
+
+ // StatusReasonCancelBuildFailed is an error condition when cancelling a build
+ // fails.
+ StatusReasonCancelBuildFailed StatusReason = "CancelBuildFailed"
+
+ // StatusReasonBuildPodDeleted is an error condition when the build pod is
+ // deleted before build completion.
+ StatusReasonBuildPodDeleted StatusReason = "BuildPodDeleted"
+
+ // StatusReasonExceededRetryTimeout is an error condition when the build has
+ // not completed and retrying the build times out.
+ StatusReasonExceededRetryTimeout StatusReason = "ExceededRetryTimeout"
+
+ // StatusReasonMissingPushSecret indicates that the build is missing required
+ // secret for pushing the output image.
+ // The build will stay in the pending state until the secret is created, or the build times out.
+ StatusReasonMissingPushSecret StatusReason = "MissingPushSecret"
+
+ // StatusReasonPostCommitHookFailed indicates the post-commit hook failed.
+ StatusReasonPostCommitHookFailed StatusReason = "PostCommitHookFailed"
+
+ // StatusReasonPushImageToRegistryFailed indicates that an image failed to be
+ // pushed to the registry.
+ StatusReasonPushImageToRegistryFailed StatusReason = "PushImageToRegistryFailed"
+
+ // StatusReasonPullBuilderImageFailed indicates that we failed to pull the
+ // builder image.
+ StatusReasonPullBuilderImageFailed StatusReason = "PullBuilderImageFailed"
+
+ // StatusReasonFetchSourceFailed indicates that fetching the source of the
+ // build has failed.
+ StatusReasonFetchSourceFailed StatusReason = "FetchSourceFailed"
+
+ // StatusReasonFetchImageContentFailed indicates that the fetching of an image and extracting
+ // its contents for inclusion in the build has failed.
+ StatusReasonFetchImageContentFailed StatusReason = "FetchImageContentFailed"
+
+ // StatusReasonManageDockerfileFailed indicates that the set up of the Dockerfile for the build
+ // has failed.
+ StatusReasonManageDockerfileFailed StatusReason = "ManageDockerfileFailed"
+
+ // StatusReasonInvalidContextDirectory indicates that the supplied
+ // contextDir does not exist
+ StatusReasonInvalidContextDirectory StatusReason = "InvalidContextDirectory"
+
+ // StatusReasonCancelledBuild indicates that the build was cancelled by the
+ // user.
+ StatusReasonCancelledBuild StatusReason = "CancelledBuild"
+
+ // StatusReasonDockerBuildFailed indicates that the container image build strategy has
+ // failed.
+ StatusReasonDockerBuildFailed StatusReason = "DockerBuildFailed"
+
+ // StatusReasonBuildPodExists indicates that the build tried to create a
+ // build pod but one was already present.
+ StatusReasonBuildPodExists StatusReason = "BuildPodExists"
+
+ // StatusReasonNoBuildContainerStatus indicates that the build failed because the
+ // the build pod has no container statuses.
+ StatusReasonNoBuildContainerStatus StatusReason = "NoBuildContainerStatus"
+
+ // StatusReasonFailedContainer indicates that the pod for the build has at least
+ // one container with a non-zero exit status.
+ StatusReasonFailedContainer StatusReason = "FailedContainer"
+
+ // StatusReasonUnresolvableEnvironmentVariable indicates that an error occurred processing
+ // the supplied options for environment variables in the build strategy environment
+ StatusReasonUnresolvableEnvironmentVariable StatusReason = "UnresolvableEnvironmentVariable"
+
+ // StatusReasonGenericBuildFailed is the reason associated with a broad
+ // range of build failures.
+ StatusReasonGenericBuildFailed StatusReason = "GenericBuildFailed"
+
+ // StatusReasonOutOfMemoryKilled indicates that the build pod was killed for its memory consumption
+ StatusReasonOutOfMemoryKilled StatusReason = "OutOfMemoryKilled"
+
+ // StatusReasonCannotRetrieveServiceAccount is the reason associated with a failure
+ // to look up the service account associated with the BuildConfig.
+ StatusReasonCannotRetrieveServiceAccount StatusReason = "CannotRetrieveServiceAccount"
+
+ // StatusReasonBuildPodEvicted is the reason a build fails due to the build pod being evicted
+ // from its node
+ StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted"
+)
+
+// WhitelistEnvVarNames is a list of environment variable names that are allowed to be specified
+// in a buildconfig and merged into the created build pods, the code for this is located in
+// openshift/openshift-controller-manager
+var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "GIT_LFS_SKIP_SMUDGE", "LANG",
+ "HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY", "http_proxy", "https_proxy", "no_proxy"}
+
+// env vars
+const (
+
+ // CustomBuildStrategyBaseImageKey is the environment variable that indicates the base image to be used when
+ // performing a custom build, if needed.
+ CustomBuildStrategyBaseImageKey = "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE"
+
+ // AllowedUIDs is an environment variable that contains ranges of UIDs that are allowed in
+ // Source builder images
+ AllowedUIDs = "ALLOWED_UIDS"
+ // DropCapabilities is an environment variable that contains a list of capabilities to drop when
+ // executing a Source build
+ DropCapabilities = "DROP_CAPS"
+)
+
+// keys inside of secrets and configmaps
+const (
+ // WebHookSecretKey is the key used to identify the value containing the webhook invocation
+ // secret within a secret referenced by a webhook trigger.
+ WebHookSecretKey = "WebHookSecretKey"
+
+ // RegistryConfKey is the ConfigMap key for the build pod's registry configuration file.
+ RegistryConfKey = "registries.conf"
+
+ // SignaturePolicyKey is the ConfigMap key for the build pod's image signature policy file.
+ SignaturePolicyKey = "policy.json"
+
+ // ServiceCAKey is the ConfigMap key for the service signing certificate authority mounted into build pods.
+ ServiceCAKey = "service-ca.crt"
+)
diff --git a/vendor/github.com/openshift/api/build/v1/doc.go b/vendor/github.com/openshift/api/build/v1/doc.go
new file mode 100644
index 0000000000..9bc16f64b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/build/apis/build
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=build.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/build/v1/generated.pb.go b/vendor/github.com/openshift/api/build/v1/generated.pb.go
new file mode 100644
index 0000000000..1b026f354d
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/generated.pb.go
@@ -0,0 +1,17545 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/build/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+ time "time"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *BinaryBuildRequestOptions) Reset() { *m = BinaryBuildRequestOptions{} }
+func (*BinaryBuildRequestOptions) ProtoMessage() {}
+func (*BinaryBuildRequestOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{0}
+}
+func (m *BinaryBuildRequestOptions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BinaryBuildRequestOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BinaryBuildRequestOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BinaryBuildRequestOptions.Merge(m, src)
+}
+func (m *BinaryBuildRequestOptions) XXX_Size() int {
+ return m.Size()
+}
+func (m *BinaryBuildRequestOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_BinaryBuildRequestOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BinaryBuildRequestOptions proto.InternalMessageInfo
+
+func (m *BinaryBuildSource) Reset() { *m = BinaryBuildSource{} }
+func (*BinaryBuildSource) ProtoMessage() {}
+func (*BinaryBuildSource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{1}
+}
+func (m *BinaryBuildSource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BinaryBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BinaryBuildSource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BinaryBuildSource.Merge(m, src)
+}
+func (m *BinaryBuildSource) XXX_Size() int {
+ return m.Size()
+}
+func (m *BinaryBuildSource) XXX_DiscardUnknown() {
+ xxx_messageInfo_BinaryBuildSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BinaryBuildSource proto.InternalMessageInfo
+
+func (m *BitbucketWebHookCause) Reset() { *m = BitbucketWebHookCause{} }
+func (*BitbucketWebHookCause) ProtoMessage() {}
+func (*BitbucketWebHookCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{2}
+}
+func (m *BitbucketWebHookCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BitbucketWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BitbucketWebHookCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BitbucketWebHookCause.Merge(m, src)
+}
+func (m *BitbucketWebHookCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *BitbucketWebHookCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_BitbucketWebHookCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BitbucketWebHookCause proto.InternalMessageInfo
+
+func (m *Build) Reset() { *m = Build{} }
+func (*Build) ProtoMessage() {}
+func (*Build) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{3}
+}
+func (m *Build) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Build) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Build) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Build.Merge(m, src)
+}
+func (m *Build) XXX_Size() int {
+ return m.Size()
+}
+func (m *Build) XXX_DiscardUnknown() {
+ xxx_messageInfo_Build.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Build proto.InternalMessageInfo
+
+func (m *BuildCondition) Reset() { *m = BuildCondition{} }
+func (*BuildCondition) ProtoMessage() {}
+func (*BuildCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{4}
+}
+func (m *BuildCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildCondition.Merge(m, src)
+}
+func (m *BuildCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildCondition proto.InternalMessageInfo
+
+func (m *BuildConfig) Reset() { *m = BuildConfig{} }
+func (*BuildConfig) ProtoMessage() {}
+func (*BuildConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{5}
+}
+func (m *BuildConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildConfig.Merge(m, src)
+}
+func (m *BuildConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildConfig proto.InternalMessageInfo
+
+func (m *BuildConfigList) Reset() { *m = BuildConfigList{} }
+func (*BuildConfigList) ProtoMessage() {}
+func (*BuildConfigList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{6}
+}
+func (m *BuildConfigList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildConfigList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildConfigList.Merge(m, src)
+}
+func (m *BuildConfigList) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildConfigList) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildConfigList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildConfigList proto.InternalMessageInfo
+
+func (m *BuildConfigSpec) Reset() { *m = BuildConfigSpec{} }
+func (*BuildConfigSpec) ProtoMessage() {}
+func (*BuildConfigSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{7}
+}
+func (m *BuildConfigSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildConfigSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildConfigSpec.Merge(m, src)
+}
+func (m *BuildConfigSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildConfigSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildConfigSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildConfigSpec proto.InternalMessageInfo
+
+func (m *BuildConfigStatus) Reset() { *m = BuildConfigStatus{} }
+func (*BuildConfigStatus) ProtoMessage() {}
+func (*BuildConfigStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{8}
+}
+func (m *BuildConfigStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildConfigStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildConfigStatus.Merge(m, src)
+}
+func (m *BuildConfigStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildConfigStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildConfigStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildConfigStatus proto.InternalMessageInfo
+
+func (m *BuildList) Reset() { *m = BuildList{} }
+func (*BuildList) ProtoMessage() {}
+func (*BuildList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{9}
+}
+func (m *BuildList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildList.Merge(m, src)
+}
+func (m *BuildList) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildList) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildList proto.InternalMessageInfo
+
+func (m *BuildLog) Reset() { *m = BuildLog{} }
+func (*BuildLog) ProtoMessage() {}
+func (*BuildLog) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{10}
+}
+func (m *BuildLog) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildLog) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildLog.Merge(m, src)
+}
+func (m *BuildLog) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildLog) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildLog.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildLog proto.InternalMessageInfo
+
+func (m *BuildLogOptions) Reset() { *m = BuildLogOptions{} }
+func (*BuildLogOptions) ProtoMessage() {}
+func (*BuildLogOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{11}
+}
+func (m *BuildLogOptions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildLogOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildLogOptions.Merge(m, src)
+}
+func (m *BuildLogOptions) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildLogOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildLogOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildLogOptions proto.InternalMessageInfo
+
+func (m *BuildOutput) Reset() { *m = BuildOutput{} }
+func (*BuildOutput) ProtoMessage() {}
+func (*BuildOutput) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{12}
+}
+func (m *BuildOutput) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildOutput) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildOutput.Merge(m, src)
+}
+func (m *BuildOutput) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildOutput) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildOutput.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildOutput proto.InternalMessageInfo
+
+func (m *BuildPostCommitSpec) Reset() { *m = BuildPostCommitSpec{} }
+func (*BuildPostCommitSpec) ProtoMessage() {}
+func (*BuildPostCommitSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{13}
+}
+func (m *BuildPostCommitSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildPostCommitSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildPostCommitSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildPostCommitSpec.Merge(m, src)
+}
+func (m *BuildPostCommitSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildPostCommitSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildPostCommitSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildPostCommitSpec proto.InternalMessageInfo
+
+func (m *BuildRequest) Reset() { *m = BuildRequest{} }
+func (*BuildRequest) ProtoMessage() {}
+func (*BuildRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{14}
+}
+func (m *BuildRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildRequest.Merge(m, src)
+}
+func (m *BuildRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildRequest proto.InternalMessageInfo
+
+func (m *BuildSource) Reset() { *m = BuildSource{} }
+func (*BuildSource) ProtoMessage() {}
+func (*BuildSource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{15}
+}
+func (m *BuildSource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildSource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildSource.Merge(m, src)
+}
+func (m *BuildSource) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildSource) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildSource proto.InternalMessageInfo
+
+func (m *BuildSpec) Reset() { *m = BuildSpec{} }
+func (*BuildSpec) ProtoMessage() {}
+func (*BuildSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{16}
+}
+func (m *BuildSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildSpec.Merge(m, src)
+}
+func (m *BuildSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildSpec proto.InternalMessageInfo
+
+func (m *BuildStatus) Reset() { *m = BuildStatus{} }
+func (*BuildStatus) ProtoMessage() {}
+func (*BuildStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{17}
+}
+func (m *BuildStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildStatus.Merge(m, src)
+}
+func (m *BuildStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildStatus proto.InternalMessageInfo
+
+func (m *BuildStatusOutput) Reset() { *m = BuildStatusOutput{} }
+func (*BuildStatusOutput) ProtoMessage() {}
+func (*BuildStatusOutput) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{18}
+}
+func (m *BuildStatusOutput) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildStatusOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildStatusOutput) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildStatusOutput.Merge(m, src)
+}
+func (m *BuildStatusOutput) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildStatusOutput) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildStatusOutput.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildStatusOutput proto.InternalMessageInfo
+
+func (m *BuildStatusOutputTo) Reset() { *m = BuildStatusOutputTo{} }
+func (*BuildStatusOutputTo) ProtoMessage() {}
+func (*BuildStatusOutputTo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{19}
+}
+func (m *BuildStatusOutputTo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildStatusOutputTo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildStatusOutputTo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildStatusOutputTo.Merge(m, src)
+}
+func (m *BuildStatusOutputTo) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildStatusOutputTo) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildStatusOutputTo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildStatusOutputTo proto.InternalMessageInfo
+
+func (m *BuildStrategy) Reset() { *m = BuildStrategy{} }
+func (*BuildStrategy) ProtoMessage() {}
+func (*BuildStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{20}
+}
+func (m *BuildStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildStrategy.Merge(m, src)
+}
+func (m *BuildStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildStrategy proto.InternalMessageInfo
+
+func (m *BuildTriggerCause) Reset() { *m = BuildTriggerCause{} }
+func (*BuildTriggerCause) ProtoMessage() {}
+func (*BuildTriggerCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{21}
+}
+func (m *BuildTriggerCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildTriggerCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildTriggerCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildTriggerCause.Merge(m, src)
+}
+func (m *BuildTriggerCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildTriggerCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildTriggerCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildTriggerCause proto.InternalMessageInfo
+
+func (m *BuildTriggerPolicy) Reset() { *m = BuildTriggerPolicy{} }
+func (*BuildTriggerPolicy) ProtoMessage() {}
+func (*BuildTriggerPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{22}
+}
+func (m *BuildTriggerPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildTriggerPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildTriggerPolicy.Merge(m, src)
+}
+func (m *BuildTriggerPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildTriggerPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildTriggerPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildTriggerPolicy proto.InternalMessageInfo
+
+func (m *BuildVolume) Reset() { *m = BuildVolume{} }
+func (*BuildVolume) ProtoMessage() {}
+func (*BuildVolume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{23}
+}
+func (m *BuildVolume) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildVolume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildVolume.Merge(m, src)
+}
+func (m *BuildVolume) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildVolume) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildVolume proto.InternalMessageInfo
+
+func (m *BuildVolumeMount) Reset() { *m = BuildVolumeMount{} }
+func (*BuildVolumeMount) ProtoMessage() {}
+func (*BuildVolumeMount) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{24}
+}
+func (m *BuildVolumeMount) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildVolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildVolumeMount) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildVolumeMount.Merge(m, src)
+}
+func (m *BuildVolumeMount) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildVolumeMount) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildVolumeMount.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildVolumeMount proto.InternalMessageInfo
+
+func (m *BuildVolumeSource) Reset() { *m = BuildVolumeSource{} }
+func (*BuildVolumeSource) ProtoMessage() {}
+func (*BuildVolumeSource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{25}
+}
+func (m *BuildVolumeSource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BuildVolumeSource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildVolumeSource.Merge(m, src)
+}
+func (m *BuildVolumeSource) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildVolumeSource) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildVolumeSource proto.InternalMessageInfo
+
+func (m *CommonSpec) Reset() { *m = CommonSpec{} }
+func (*CommonSpec) ProtoMessage() {}
+func (*CommonSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{26}
+}
+func (m *CommonSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CommonSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CommonSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommonSpec.Merge(m, src)
+}
+func (m *CommonSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *CommonSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommonSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommonSpec proto.InternalMessageInfo
+
+func (m *CommonWebHookCause) Reset() { *m = CommonWebHookCause{} }
+func (*CommonWebHookCause) ProtoMessage() {}
+func (*CommonWebHookCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{27}
+}
+func (m *CommonWebHookCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CommonWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CommonWebHookCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CommonWebHookCause.Merge(m, src)
+}
+func (m *CommonWebHookCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *CommonWebHookCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_CommonWebHookCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CommonWebHookCause proto.InternalMessageInfo
+
+func (m *ConfigMapBuildSource) Reset() { *m = ConfigMapBuildSource{} }
+func (*ConfigMapBuildSource) ProtoMessage() {}
+func (*ConfigMapBuildSource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{28}
+}
+func (m *ConfigMapBuildSource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfigMapBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ConfigMapBuildSource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfigMapBuildSource.Merge(m, src)
+}
+func (m *ConfigMapBuildSource) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfigMapBuildSource) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfigMapBuildSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMapBuildSource proto.InternalMessageInfo
+
+func (m *CustomBuildStrategy) Reset() { *m = CustomBuildStrategy{} }
+func (*CustomBuildStrategy) ProtoMessage() {}
+func (*CustomBuildStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{29}
+}
+func (m *CustomBuildStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CustomBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CustomBuildStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CustomBuildStrategy.Merge(m, src)
+}
+func (m *CustomBuildStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *CustomBuildStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_CustomBuildStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CustomBuildStrategy proto.InternalMessageInfo
+
+func (m *DockerBuildStrategy) Reset() { *m = DockerBuildStrategy{} }
+func (*DockerBuildStrategy) ProtoMessage() {}
+func (*DockerBuildStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{30}
+}
+func (m *DockerBuildStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DockerBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DockerBuildStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DockerBuildStrategy.Merge(m, src)
+}
+func (m *DockerBuildStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *DockerBuildStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_DockerBuildStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DockerBuildStrategy proto.InternalMessageInfo
+
+func (m *DockerStrategyOptions) Reset() { *m = DockerStrategyOptions{} }
+func (*DockerStrategyOptions) ProtoMessage() {}
+func (*DockerStrategyOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{31}
+}
+func (m *DockerStrategyOptions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DockerStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DockerStrategyOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DockerStrategyOptions.Merge(m, src)
+}
+func (m *DockerStrategyOptions) XXX_Size() int {
+ return m.Size()
+}
+func (m *DockerStrategyOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_DockerStrategyOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DockerStrategyOptions proto.InternalMessageInfo
+
+func (m *GenericWebHookCause) Reset() { *m = GenericWebHookCause{} }
+func (*GenericWebHookCause) ProtoMessage() {}
+func (*GenericWebHookCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{32}
+}
+func (m *GenericWebHookCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenericWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GenericWebHookCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenericWebHookCause.Merge(m, src)
+}
+func (m *GenericWebHookCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenericWebHookCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenericWebHookCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenericWebHookCause proto.InternalMessageInfo
+
+func (m *GenericWebHookEvent) Reset() { *m = GenericWebHookEvent{} }
+func (*GenericWebHookEvent) ProtoMessage() {}
+func (*GenericWebHookEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{33}
+}
+func (m *GenericWebHookEvent) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenericWebHookEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GenericWebHookEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenericWebHookEvent.Merge(m, src)
+}
+func (m *GenericWebHookEvent) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenericWebHookEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenericWebHookEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenericWebHookEvent proto.InternalMessageInfo
+
+func (m *GitBuildSource) Reset() { *m = GitBuildSource{} }
+func (*GitBuildSource) ProtoMessage() {}
+func (*GitBuildSource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{34}
+}
+func (m *GitBuildSource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GitBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GitBuildSource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GitBuildSource.Merge(m, src)
+}
+func (m *GitBuildSource) XXX_Size() int {
+ return m.Size()
+}
+func (m *GitBuildSource) XXX_DiscardUnknown() {
+ xxx_messageInfo_GitBuildSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GitBuildSource proto.InternalMessageInfo
+
+func (m *GitHubWebHookCause) Reset() { *m = GitHubWebHookCause{} }
+func (*GitHubWebHookCause) ProtoMessage() {}
+func (*GitHubWebHookCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{35}
+}
+func (m *GitHubWebHookCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GitHubWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GitHubWebHookCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GitHubWebHookCause.Merge(m, src)
+}
+func (m *GitHubWebHookCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *GitHubWebHookCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_GitHubWebHookCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GitHubWebHookCause proto.InternalMessageInfo
+
+func (m *GitInfo) Reset() { *m = GitInfo{} }
+func (*GitInfo) ProtoMessage() {}
+func (*GitInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{36}
+}
+func (m *GitInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GitInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GitInfo.Merge(m, src)
+}
+func (m *GitInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *GitInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_GitInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GitInfo proto.InternalMessageInfo
+
+func (m *GitLabWebHookCause) Reset() { *m = GitLabWebHookCause{} }
+func (*GitLabWebHookCause) ProtoMessage() {}
+func (*GitLabWebHookCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{37}
+}
+func (m *GitLabWebHookCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GitLabWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GitLabWebHookCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GitLabWebHookCause.Merge(m, src)
+}
+func (m *GitLabWebHookCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *GitLabWebHookCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_GitLabWebHookCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GitLabWebHookCause proto.InternalMessageInfo
+
+func (m *GitRefInfo) Reset() { *m = GitRefInfo{} }
+func (*GitRefInfo) ProtoMessage() {}
+func (*GitRefInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{38}
+}
+func (m *GitRefInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GitRefInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GitRefInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GitRefInfo.Merge(m, src)
+}
+func (m *GitRefInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *GitRefInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_GitRefInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GitRefInfo proto.InternalMessageInfo
+
+func (m *GitSourceRevision) Reset() { *m = GitSourceRevision{} }
+func (*GitSourceRevision) ProtoMessage() {}
+func (*GitSourceRevision) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{39}
+}
+func (m *GitSourceRevision) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GitSourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GitSourceRevision) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GitSourceRevision.Merge(m, src)
+}
+func (m *GitSourceRevision) XXX_Size() int {
+ return m.Size()
+}
+func (m *GitSourceRevision) XXX_DiscardUnknown() {
+ xxx_messageInfo_GitSourceRevision.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GitSourceRevision proto.InternalMessageInfo
+
+func (m *ImageChangeCause) Reset() { *m = ImageChangeCause{} }
+func (*ImageChangeCause) ProtoMessage() {}
+func (*ImageChangeCause) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{40}
+}
+func (m *ImageChangeCause) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageChangeCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageChangeCause) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageChangeCause.Merge(m, src)
+}
+func (m *ImageChangeCause) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageChangeCause) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageChangeCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageChangeCause proto.InternalMessageInfo
+
+func (m *ImageChangeTrigger) Reset() { *m = ImageChangeTrigger{} }
+func (*ImageChangeTrigger) ProtoMessage() {}
+func (*ImageChangeTrigger) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{41}
+}
+func (m *ImageChangeTrigger) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageChangeTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageChangeTrigger) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageChangeTrigger.Merge(m, src)
+}
+func (m *ImageChangeTrigger) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageChangeTrigger) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageChangeTrigger.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageChangeTrigger proto.InternalMessageInfo
+
+func (m *ImageChangeTriggerStatus) Reset() { *m = ImageChangeTriggerStatus{} }
+func (*ImageChangeTriggerStatus) ProtoMessage() {}
+func (*ImageChangeTriggerStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{42}
+}
+func (m *ImageChangeTriggerStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageChangeTriggerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageChangeTriggerStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageChangeTriggerStatus.Merge(m, src)
+}
+func (m *ImageChangeTriggerStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageChangeTriggerStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageChangeTriggerStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageChangeTriggerStatus proto.InternalMessageInfo
+
+func (m *ImageLabel) Reset() { *m = ImageLabel{} }
+func (*ImageLabel) ProtoMessage() {}
+func (*ImageLabel) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{43}
+}
+func (m *ImageLabel) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageLabel) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageLabel.Merge(m, src)
+}
+func (m *ImageLabel) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageLabel) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageLabel.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageLabel proto.InternalMessageInfo
+
+func (m *ImageSource) Reset() { *m = ImageSource{} }
+func (*ImageSource) ProtoMessage() {}
+func (*ImageSource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{44}
+}
+func (m *ImageSource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageSource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageSource.Merge(m, src)
+}
+func (m *ImageSource) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageSource) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageSource proto.InternalMessageInfo
+
+func (m *ImageSourcePath) Reset() { *m = ImageSourcePath{} }
+func (*ImageSourcePath) ProtoMessage() {}
+func (*ImageSourcePath) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{45}
+}
+func (m *ImageSourcePath) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageSourcePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageSourcePath) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageSourcePath.Merge(m, src)
+}
+func (m *ImageSourcePath) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageSourcePath) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageSourcePath.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageSourcePath proto.InternalMessageInfo
+
+func (m *ImageStreamTagReference) Reset() { *m = ImageStreamTagReference{} }
+func (*ImageStreamTagReference) ProtoMessage() {}
+func (*ImageStreamTagReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{46}
+}
+func (m *ImageStreamTagReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamTagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamTagReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamTagReference.Merge(m, src)
+}
+func (m *ImageStreamTagReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamTagReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamTagReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamTagReference proto.InternalMessageInfo
+
+func (m *JenkinsPipelineBuildStrategy) Reset() { *m = JenkinsPipelineBuildStrategy{} }
+func (*JenkinsPipelineBuildStrategy) ProtoMessage() {}
+func (*JenkinsPipelineBuildStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{47}
+}
+func (m *JenkinsPipelineBuildStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *JenkinsPipelineBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *JenkinsPipelineBuildStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_JenkinsPipelineBuildStrategy.Merge(m, src)
+}
+func (m *JenkinsPipelineBuildStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *JenkinsPipelineBuildStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_JenkinsPipelineBuildStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_JenkinsPipelineBuildStrategy proto.InternalMessageInfo
+
+func (m *OptionalNodeSelector) Reset() { *m = OptionalNodeSelector{} }
+func (*OptionalNodeSelector) ProtoMessage() {}
+func (*OptionalNodeSelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{48}
+}
+func (m *OptionalNodeSelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OptionalNodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OptionalNodeSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OptionalNodeSelector.Merge(m, src)
+}
+func (m *OptionalNodeSelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *OptionalNodeSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_OptionalNodeSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OptionalNodeSelector proto.InternalMessageInfo
+
+func (m *ProxyConfig) Reset() { *m = ProxyConfig{} }
+func (*ProxyConfig) ProtoMessage() {}
+func (*ProxyConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{49}
+}
+func (m *ProxyConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProxyConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProxyConfig.Merge(m, src)
+}
+func (m *ProxyConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProxyConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProxyConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProxyConfig proto.InternalMessageInfo
+
+func (m *SecretBuildSource) Reset() { *m = SecretBuildSource{} }
+func (*SecretBuildSource) ProtoMessage() {}
+func (*SecretBuildSource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{50}
+}
+func (m *SecretBuildSource) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretBuildSource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretBuildSource.Merge(m, src)
+}
+func (m *SecretBuildSource) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretBuildSource) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretBuildSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretBuildSource proto.InternalMessageInfo
+
+func (m *SecretLocalReference) Reset() { *m = SecretLocalReference{} }
+func (*SecretLocalReference) ProtoMessage() {}
+func (*SecretLocalReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{51}
+}
+func (m *SecretLocalReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretLocalReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretLocalReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretLocalReference.Merge(m, src)
+}
+func (m *SecretLocalReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretLocalReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretLocalReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretLocalReference proto.InternalMessageInfo
+
+func (m *SecretSpec) Reset() { *m = SecretSpec{} }
+func (*SecretSpec) ProtoMessage() {}
+func (*SecretSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{52}
+}
+func (m *SecretSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretSpec.Merge(m, src)
+}
+func (m *SecretSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretSpec proto.InternalMessageInfo
+
+func (m *SourceBuildStrategy) Reset() { *m = SourceBuildStrategy{} }
+func (*SourceBuildStrategy) ProtoMessage() {}
+func (*SourceBuildStrategy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{53}
+}
+func (m *SourceBuildStrategy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SourceBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SourceBuildStrategy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourceBuildStrategy.Merge(m, src)
+}
+func (m *SourceBuildStrategy) XXX_Size() int {
+ return m.Size()
+}
+func (m *SourceBuildStrategy) XXX_DiscardUnknown() {
+ xxx_messageInfo_SourceBuildStrategy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceBuildStrategy proto.InternalMessageInfo
+
+func (m *SourceControlUser) Reset() { *m = SourceControlUser{} }
+func (*SourceControlUser) ProtoMessage() {}
+func (*SourceControlUser) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{54}
+}
+func (m *SourceControlUser) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SourceControlUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SourceControlUser) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourceControlUser.Merge(m, src)
+}
+func (m *SourceControlUser) XXX_Size() int {
+ return m.Size()
+}
+func (m *SourceControlUser) XXX_DiscardUnknown() {
+ xxx_messageInfo_SourceControlUser.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceControlUser proto.InternalMessageInfo
+
+func (m *SourceRevision) Reset() { *m = SourceRevision{} }
+func (*SourceRevision) ProtoMessage() {}
+func (*SourceRevision) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{55}
+}
+func (m *SourceRevision) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SourceRevision) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourceRevision.Merge(m, src)
+}
+func (m *SourceRevision) XXX_Size() int {
+ return m.Size()
+}
+func (m *SourceRevision) XXX_DiscardUnknown() {
+ xxx_messageInfo_SourceRevision.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceRevision proto.InternalMessageInfo
+
+func (m *SourceStrategyOptions) Reset() { *m = SourceStrategyOptions{} }
+func (*SourceStrategyOptions) ProtoMessage() {}
+func (*SourceStrategyOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{56}
+}
+func (m *SourceStrategyOptions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SourceStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SourceStrategyOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourceStrategyOptions.Merge(m, src)
+}
+func (m *SourceStrategyOptions) XXX_Size() int {
+ return m.Size()
+}
+func (m *SourceStrategyOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_SourceStrategyOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceStrategyOptions proto.InternalMessageInfo
+
+func (m *StageInfo) Reset() { *m = StageInfo{} }
+func (*StageInfo) ProtoMessage() {}
+func (*StageInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{57}
+}
+func (m *StageInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StageInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StageInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StageInfo.Merge(m, src)
+}
+func (m *StageInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *StageInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_StageInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StageInfo proto.InternalMessageInfo
+
+func (m *StepInfo) Reset() { *m = StepInfo{} }
+func (*StepInfo) ProtoMessage() {}
+func (*StepInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{58}
+}
+func (m *StepInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StepInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *StepInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StepInfo.Merge(m, src)
+}
+func (m *StepInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *StepInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_StepInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StepInfo proto.InternalMessageInfo
+
+func (m *WebHookTrigger) Reset() { *m = WebHookTrigger{} }
+func (*WebHookTrigger) ProtoMessage() {}
+func (*WebHookTrigger) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ba579f6f004cb75, []int{59}
+}
+func (m *WebHookTrigger) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WebHookTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WebHookTrigger) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WebHookTrigger.Merge(m, src)
+}
+func (m *WebHookTrigger) XXX_Size() int {
+ return m.Size()
+}
+func (m *WebHookTrigger) XXX_DiscardUnknown() {
+ xxx_messageInfo_WebHookTrigger.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WebHookTrigger proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*BinaryBuildRequestOptions)(nil), "github.com.openshift.api.build.v1.BinaryBuildRequestOptions")
+ proto.RegisterType((*BinaryBuildSource)(nil), "github.com.openshift.api.build.v1.BinaryBuildSource")
+ proto.RegisterType((*BitbucketWebHookCause)(nil), "github.com.openshift.api.build.v1.BitbucketWebHookCause")
+ proto.RegisterType((*Build)(nil), "github.com.openshift.api.build.v1.Build")
+ proto.RegisterType((*BuildCondition)(nil), "github.com.openshift.api.build.v1.BuildCondition")
+ proto.RegisterType((*BuildConfig)(nil), "github.com.openshift.api.build.v1.BuildConfig")
+ proto.RegisterType((*BuildConfigList)(nil), "github.com.openshift.api.build.v1.BuildConfigList")
+ proto.RegisterType((*BuildConfigSpec)(nil), "github.com.openshift.api.build.v1.BuildConfigSpec")
+ proto.RegisterType((*BuildConfigStatus)(nil), "github.com.openshift.api.build.v1.BuildConfigStatus")
+ proto.RegisterType((*BuildList)(nil), "github.com.openshift.api.build.v1.BuildList")
+ proto.RegisterType((*BuildLog)(nil), "github.com.openshift.api.build.v1.BuildLog")
+ proto.RegisterType((*BuildLogOptions)(nil), "github.com.openshift.api.build.v1.BuildLogOptions")
+ proto.RegisterType((*BuildOutput)(nil), "github.com.openshift.api.build.v1.BuildOutput")
+ proto.RegisterType((*BuildPostCommitSpec)(nil), "github.com.openshift.api.build.v1.BuildPostCommitSpec")
+ proto.RegisterType((*BuildRequest)(nil), "github.com.openshift.api.build.v1.BuildRequest")
+ proto.RegisterType((*BuildSource)(nil), "github.com.openshift.api.build.v1.BuildSource")
+ proto.RegisterType((*BuildSpec)(nil), "github.com.openshift.api.build.v1.BuildSpec")
+ proto.RegisterType((*BuildStatus)(nil), "github.com.openshift.api.build.v1.BuildStatus")
+ proto.RegisterType((*BuildStatusOutput)(nil), "github.com.openshift.api.build.v1.BuildStatusOutput")
+ proto.RegisterType((*BuildStatusOutputTo)(nil), "github.com.openshift.api.build.v1.BuildStatusOutputTo")
+ proto.RegisterType((*BuildStrategy)(nil), "github.com.openshift.api.build.v1.BuildStrategy")
+ proto.RegisterType((*BuildTriggerCause)(nil), "github.com.openshift.api.build.v1.BuildTriggerCause")
+ proto.RegisterType((*BuildTriggerPolicy)(nil), "github.com.openshift.api.build.v1.BuildTriggerPolicy")
+ proto.RegisterType((*BuildVolume)(nil), "github.com.openshift.api.build.v1.BuildVolume")
+ proto.RegisterType((*BuildVolumeMount)(nil), "github.com.openshift.api.build.v1.BuildVolumeMount")
+ proto.RegisterType((*BuildVolumeSource)(nil), "github.com.openshift.api.build.v1.BuildVolumeSource")
+ proto.RegisterType((*CommonSpec)(nil), "github.com.openshift.api.build.v1.CommonSpec")
+ proto.RegisterType((*CommonWebHookCause)(nil), "github.com.openshift.api.build.v1.CommonWebHookCause")
+ proto.RegisterType((*ConfigMapBuildSource)(nil), "github.com.openshift.api.build.v1.ConfigMapBuildSource")
+ proto.RegisterType((*CustomBuildStrategy)(nil), "github.com.openshift.api.build.v1.CustomBuildStrategy")
+ proto.RegisterType((*DockerBuildStrategy)(nil), "github.com.openshift.api.build.v1.DockerBuildStrategy")
+ proto.RegisterType((*DockerStrategyOptions)(nil), "github.com.openshift.api.build.v1.DockerStrategyOptions")
+ proto.RegisterType((*GenericWebHookCause)(nil), "github.com.openshift.api.build.v1.GenericWebHookCause")
+ proto.RegisterType((*GenericWebHookEvent)(nil), "github.com.openshift.api.build.v1.GenericWebHookEvent")
+ proto.RegisterType((*GitBuildSource)(nil), "github.com.openshift.api.build.v1.GitBuildSource")
+ proto.RegisterType((*GitHubWebHookCause)(nil), "github.com.openshift.api.build.v1.GitHubWebHookCause")
+ proto.RegisterType((*GitInfo)(nil), "github.com.openshift.api.build.v1.GitInfo")
+ proto.RegisterType((*GitLabWebHookCause)(nil), "github.com.openshift.api.build.v1.GitLabWebHookCause")
+ proto.RegisterType((*GitRefInfo)(nil), "github.com.openshift.api.build.v1.GitRefInfo")
+ proto.RegisterType((*GitSourceRevision)(nil), "github.com.openshift.api.build.v1.GitSourceRevision")
+ proto.RegisterType((*ImageChangeCause)(nil), "github.com.openshift.api.build.v1.ImageChangeCause")
+ proto.RegisterType((*ImageChangeTrigger)(nil), "github.com.openshift.api.build.v1.ImageChangeTrigger")
+ proto.RegisterType((*ImageChangeTriggerStatus)(nil), "github.com.openshift.api.build.v1.ImageChangeTriggerStatus")
+ proto.RegisterType((*ImageLabel)(nil), "github.com.openshift.api.build.v1.ImageLabel")
+ proto.RegisterType((*ImageSource)(nil), "github.com.openshift.api.build.v1.ImageSource")
+ proto.RegisterType((*ImageSourcePath)(nil), "github.com.openshift.api.build.v1.ImageSourcePath")
+ proto.RegisterType((*ImageStreamTagReference)(nil), "github.com.openshift.api.build.v1.ImageStreamTagReference")
+ proto.RegisterType((*JenkinsPipelineBuildStrategy)(nil), "github.com.openshift.api.build.v1.JenkinsPipelineBuildStrategy")
+ proto.RegisterType((*OptionalNodeSelector)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector.ItemsEntry")
+ proto.RegisterType((*ProxyConfig)(nil), "github.com.openshift.api.build.v1.ProxyConfig")
+ proto.RegisterType((*SecretBuildSource)(nil), "github.com.openshift.api.build.v1.SecretBuildSource")
+ proto.RegisterType((*SecretLocalReference)(nil), "github.com.openshift.api.build.v1.SecretLocalReference")
+ proto.RegisterType((*SecretSpec)(nil), "github.com.openshift.api.build.v1.SecretSpec")
+ proto.RegisterType((*SourceBuildStrategy)(nil), "github.com.openshift.api.build.v1.SourceBuildStrategy")
+ proto.RegisterType((*SourceControlUser)(nil), "github.com.openshift.api.build.v1.SourceControlUser")
+ proto.RegisterType((*SourceRevision)(nil), "github.com.openshift.api.build.v1.SourceRevision")
+ proto.RegisterType((*SourceStrategyOptions)(nil), "github.com.openshift.api.build.v1.SourceStrategyOptions")
+ proto.RegisterType((*StageInfo)(nil), "github.com.openshift.api.build.v1.StageInfo")
+ proto.RegisterType((*StepInfo)(nil), "github.com.openshift.api.build.v1.StepInfo")
+ proto.RegisterType((*WebHookTrigger)(nil), "github.com.openshift.api.build.v1.WebHookTrigger")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/build/v1/generated.proto", fileDescriptor_2ba579f6f004cb75)
+}
+
+var fileDescriptor_2ba579f6f004cb75 = []byte{
+ // 4386 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5c, 0x4d, 0x6c, 0x1c, 0x47,
+ 0x76, 0x56, 0xcf, 0x0f, 0x67, 0xe6, 0x0d, 0x45, 0x52, 0x45, 0xc9, 0x1a, 0x69, 0xb5, 0x1c, 0xb9,
+ 0x1d, 0x1b, 0x76, 0x6c, 0x0f, 0x57, 0xb2, 0xa4, 0xc8, 0x36, 0xe2, 0x80, 0x43, 0x52, 0x32, 0xb5,
+ 0x23, 0x89, 0xa8, 0xa1, 0x65, 0xef, 0x5a, 0xd8, 0xa4, 0xd9, 0x53, 0x33, 0x6c, 0x73, 0xa6, 0x7b,
+ 0xdc, 0xd5, 0x43, 0x9b, 0x0b, 0x04, 0x58, 0x04, 0x58, 0x24, 0xeb, 0xbd, 0x64, 0x2f, 0x8b, 0x24,
+ 0x97, 0x24, 0x58, 0xe4, 0x94, 0x53, 0x02, 0x04, 0xd8, 0x60, 0x2f, 0x01, 0xb2, 0x07, 0x1f, 0x12,
+ 0x60, 0x83, 0x04, 0x88, 0x81, 0x5d, 0x0c, 0x62, 0xe6, 0x10, 0x20, 0x87, 0x00, 0xb9, 0xea, 0x10,
+ 0x04, 0xf5, 0xd3, 0xdd, 0x55, 0x3d, 0x3d, 0x54, 0x0f, 0x25, 0x3b, 0x9b, 0xe4, 0xc6, 0xa9, 0xf7,
+ 0xde, 0xf7, 0xea, 0xe7, 0xd5, 0xab, 0xf7, 0x5e, 0x55, 0x13, 0xae, 0xf4, 0x9c, 0x60, 0x6f, 0xb4,
+ 0xdb, 0xb0, 0xbd, 0xc1, 0xaa, 0x37, 0x24, 0x2e, 0xdd, 0x73, 0xba, 0xc1, 0xaa, 0x35, 0x74, 0x56,
+ 0x77, 0x47, 0x4e, 0xbf, 0xb3, 0x7a, 0x70, 0x65, 0xb5, 0x47, 0x5c, 0xe2, 0x5b, 0x01, 0xe9, 0x34,
+ 0x86, 0xbe, 0x17, 0x78, 0xe8, 0xd9, 0x58, 0xa4, 0x11, 0x89, 0x34, 0xac, 0xa1, 0xd3, 0xe0, 0x22,
+ 0x8d, 0x83, 0x2b, 0x17, 0x5f, 0x55, 0x50, 0x7b, 0x5e, 0xcf, 0x5b, 0xe5, 0x92, 0xbb, 0xa3, 0x2e,
+ 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x04, 0xe2, 0x45, 0x73, 0xff, 0x26, 0x6d, 0x38, 0x1e, 0x57, 0x6b,
+ 0x7b, 0x3e, 0x49, 0xd1, 0x7a, 0xf1, 0x5a, 0xcc, 0x33, 0xb0, 0xec, 0x3d, 0xc7, 0x25, 0xfe, 0xe1,
+ 0xea, 0x70, 0xbf, 0xc7, 0x1a, 0xe8, 0xea, 0x80, 0x04, 0x56, 0x9a, 0xd4, 0x8d, 0x69, 0x52, 0xfe,
+ 0xc8, 0x0d, 0x9c, 0x01, 0x59, 0xa5, 0xf6, 0x1e, 0x19, 0x58, 0x49, 0x39, 0xf3, 0x6f, 0x0a, 0x70,
+ 0xa1, 0xe9, 0xb8, 0x96, 0x7f, 0xd8, 0x64, 0x63, 0xc2, 0xe4, 0xc3, 0x11, 0xa1, 0xc1, 0xfd, 0x61,
+ 0xe0, 0x78, 0x2e, 0x45, 0xbf, 0x05, 0x65, 0xa6, 0xb0, 0x63, 0x05, 0x56, 0xcd, 0xb8, 0x6c, 0xbc,
+ 0x58, 0xbd, 0xfa, 0xb5, 0x86, 0x50, 0xd4, 0x50, 0x15, 0x35, 0x86, 0xfb, 0x3d, 0xd6, 0x40, 0x1b,
+ 0x8c, 0xbb, 0x71, 0x70, 0xa5, 0x71, 0x7f, 0xf7, 0x03, 0x62, 0x07, 0x77, 0x49, 0x60, 0x35, 0xd1,
+ 0xa7, 0xe3, 0xfa, 0xa9, 0xa3, 0x71, 0x1d, 0xe2, 0x36, 0x1c, 0xa1, 0xa2, 0x17, 0x60, 0xce, 0xa2,
+ 0xb7, 0x9c, 0x3e, 0xa9, 0xe5, 0x2e, 0x1b, 0x2f, 0x56, 0x9a, 0x0b, 0x92, 0x7b, 0x6e, 0x8d, 0xb7,
+ 0x62, 0x49, 0x45, 0x37, 0x60, 0xc1, 0x27, 0x07, 0x0e, 0x75, 0x3c, 0x77, 0xdd, 0x1b, 0x0c, 0x9c,
+ 0xa0, 0x96, 0xd7, 0xf9, 0x45, 0x2b, 0x4e, 0x70, 0xa1, 0xd7, 0x61, 0x31, 0x6c, 0xb9, 0x4b, 0x28,
+ 0xb5, 0x7a, 0xa4, 0x56, 0xe0, 0x82, 0x8b, 0x52, 0xb0, 0x24, 0x9b, 0x71, 0x92, 0x0f, 0x35, 0x01,
+ 0x85, 0x4d, 0x6b, 0xa3, 0x60, 0xcf, 0xf3, 0xef, 0x59, 0x03, 0x52, 0x2b, 0x72, 0xe9, 0x68, 0x50,
+ 0x31, 0x05, 0xa7, 0x70, 0xa3, 0x4d, 0x58, 0xd6, 0x5b, 0x37, 0x07, 0x96, 0xd3, 0xaf, 0xcd, 0x71,
+ 0x90, 0x65, 0x09, 0x52, 0x55, 0x48, 0x38, 0x8d, 0x1f, 0x7d, 0x1d, 0xce, 0xe9, 0xe3, 0x0a, 0x88,
+ 0xe8, 0x4d, 0x89, 0x03, 0x9d, 0x93, 0x40, 0xa7, 0x35, 0x22, 0x4e, 0x97, 0x41, 0xf7, 0xe0, 0x99,
+ 0x09, 0x82, 0xe8, 0x56, 0x99, 0xa3, 0x3d, 0x23, 0xd1, 0x16, 0x74, 0x2a, 0x9e, 0x22, 0x65, 0xbe,
+ 0x09, 0x67, 0x14, 0x0b, 0x6a, 0x7b, 0x23, 0xdf, 0x26, 0xca, 0xba, 0x1a, 0xc7, 0xad, 0xab, 0xf9,
+ 0x89, 0x01, 0xe7, 0x9a, 0x4e, 0xb0, 0x3b, 0xb2, 0xf7, 0x49, 0xf0, 0x2e, 0xd9, 0x7d, 0xdb, 0xf3,
+ 0xf6, 0xd7, 0xad, 0x11, 0x25, 0xe8, 0x43, 0x00, 0xdb, 0x1b, 0x0c, 0x3c, 0xb7, 0x3d, 0x24, 0xb6,
+ 0xb4, 0xbe, 0xeb, 0x8d, 0xc7, 0x6e, 0xc9, 0xc6, 0x3a, 0x17, 0x52, 0xa1, 0x9a, 0x17, 0xa5, 0x72,
+ 0x34, 0x49, 0xc3, 0x8a, 0x12, 0xf3, 0x07, 0x39, 0x28, 0xf2, 0x41, 0x7c, 0x09, 0x86, 0x7f, 0x0f,
+ 0x0a, 0x94, 0x0d, 0x2c, 0xc7, 0xd1, 0x5f, 0xc9, 0x30, 0x30, 0x31, 0xbd, 0x43, 0x62, 0x37, 0xe7,
+ 0x25, 0x72, 0x81, 0xfd, 0xc2, 0x1c, 0x07, 0x3d, 0x80, 0x39, 0x1a, 0x58, 0xc1, 0x88, 0xf2, 0x8d,
+ 0x51, 0xbd, 0xda, 0xc8, 0x8c, 0xc8, 0xa5, 0xe2, 0x05, 0x12, 0xbf, 0xb1, 0x44, 0x33, 0xff, 0x3e,
+ 0x0f, 0x0b, 0x9c, 0x6f, 0xdd, 0x73, 0x3b, 0x0e, 0x73, 0x0b, 0xe8, 0x06, 0x14, 0x82, 0xc3, 0x61,
+ 0xb8, 0xb2, 0x66, 0xd8, 0x99, 0x9d, 0xc3, 0x21, 0x79, 0x34, 0xae, 0x23, 0x9d, 0x9b, 0xb5, 0x62,
+ 0xce, 0x8f, 0x5a, 0x51, 0x17, 0xc5, 0x5e, 0xbf, 0xa6, 0xab, 0x7c, 0x34, 0xae, 0xa7, 0xf8, 0xc7,
+ 0x46, 0x84, 0xa4, 0x77, 0x0c, 0x7d, 0x00, 0x0b, 0x7d, 0x8b, 0x06, 0xef, 0x0c, 0x3b, 0x56, 0x40,
+ 0x76, 0x9c, 0x01, 0xe1, 0xbb, 0xaa, 0x7a, 0xf5, 0x57, 0xb3, 0x2d, 0x14, 0x93, 0x88, 0x4d, 0xbd,
+ 0xa5, 0x21, 0xe1, 0x04, 0x32, 0x3a, 0x00, 0xc4, 0x5a, 0x76, 0x7c, 0xcb, 0xa5, 0x62, 0x54, 0x4c,
+ 0x5f, 0x7e, 0x66, 0x7d, 0x91, 0x21, 0xb6, 0x26, 0xd0, 0x70, 0x8a, 0x06, 0xb6, 0x8b, 0x7c, 0x62,
+ 0x51, 0xcf, 0x95, 0x4e, 0x2b, 0x5a, 0x24, 0xcc, 0x5b, 0xb1, 0xa4, 0xa2, 0x97, 0xa0, 0x34, 0x90,
+ 0xde, 0xad, 0x98, 0xee, 0xdd, 0x42, 0xba, 0xf9, 0xa3, 0x1c, 0x54, 0xc3, 0x15, 0xea, 0x3a, 0xbd,
+ 0x2f, 0xc1, 0xd2, 0x77, 0x34, 0x4b, 0xbf, 0x9a, 0xd5, 0x2e, 0x45, 0xff, 0xa6, 0xda, 0xfb, 0xc3,
+ 0x84, 0xbd, 0x5f, 0x9b, 0x11, 0xf7, 0x78, 0xab, 0xff, 0xa9, 0x01, 0x8b, 0x0a, 0x77, 0xcb, 0xa1,
+ 0x01, 0x7a, 0x38, 0x31, 0x53, 0x8d, 0x6c, 0x33, 0xc5, 0xa4, 0xf9, 0x3c, 0x2d, 0x49, 0x6d, 0xe5,
+ 0xb0, 0x45, 0x99, 0xa5, 0x36, 0x14, 0x9d, 0x80, 0x0c, 0xd8, 0xde, 0xc8, 0xcf, 0xb2, 0x7d, 0x45,
+ 0x07, 0x9b, 0xa7, 0x25, 0x74, 0x71, 0x8b, 0x81, 0x60, 0x81, 0x65, 0xfe, 0x22, 0xaf, 0x0d, 0x83,
+ 0x4d, 0x1f, 0xb2, 0xa1, 0x1c, 0xf8, 0x4e, 0xaf, 0x47, 0x7c, 0x5a, 0x33, 0xb8, 0xae, 0xeb, 0x59,
+ 0x75, 0xed, 0x08, 0xb9, 0x6d, 0xaf, 0xef, 0xd8, 0x87, 0xf1, 0x68, 0x64, 0x33, 0xc5, 0x11, 0x30,
+ 0x5a, 0x83, 0x8a, 0x3f, 0x72, 0x05, 0xa3, 0xdc, 0xed, 0xcf, 0x49, 0xf6, 0x0a, 0x0e, 0x09, 0x8f,
+ 0xc6, 0x75, 0xe1, 0x5a, 0xa2, 0x16, 0x1c, 0x4b, 0x21, 0x4b, 0xf3, 0xff, 0x62, 0x91, 0x5f, 0xcd,
+ 0xec, 0xff, 0xb9, 0xdd, 0x44, 0x76, 0x19, 0xb7, 0xa9, 0xfe, 0x1e, 0x75, 0xe0, 0x12, 0x1d, 0xd9,
+ 0x36, 0xa1, 0xb4, 0x3b, 0xea, 0xf3, 0x9e, 0xd0, 0xb7, 0x1d, 0x1a, 0x78, 0xfe, 0x61, 0xcb, 0x61,
+ 0x21, 0x06, 0xdb, 0x74, 0xc5, 0xe6, 0xe5, 0xa3, 0x71, 0xfd, 0x52, 0xfb, 0x18, 0x3e, 0x7c, 0x2c,
+ 0x0a, 0x7a, 0x0f, 0x6a, 0x5d, 0xcb, 0xe9, 0x93, 0x4e, 0x8a, 0x86, 0x22, 0xd7, 0x70, 0xe9, 0x68,
+ 0x5c, 0xaf, 0xdd, 0x9a, 0xc2, 0x83, 0xa7, 0x4a, 0x9b, 0xff, 0x6c, 0xc0, 0x99, 0x09, 0x9b, 0x46,
+ 0xd7, 0xa1, 0xca, 0x5c, 0xc9, 0x03, 0xe2, 0xb3, 0xc3, 0x9a, 0x9b, 0x6a, 0x3e, 0x8e, 0x35, 0x5a,
+ 0x31, 0x09, 0xab, 0x7c, 0xe8, 0x13, 0x03, 0x96, 0x9d, 0x81, 0xd5, 0x23, 0xeb, 0x7b, 0x96, 0xdb,
+ 0x23, 0xe1, 0xa2, 0x4a, 0x7b, 0x7c, 0x33, 0xc3, 0xcc, 0x6f, 0x4d, 0x48, 0xcb, 0x5d, 0xf6, 0x15,
+ 0xa9, 0x7c, 0x79, 0x92, 0x83, 0xe2, 0x34, 0xa5, 0xe6, 0x8f, 0x0d, 0xa8, 0xf0, 0x91, 0x7d, 0x09,
+ 0x3b, 0xef, 0xae, 0xbe, 0xf3, 0x5e, 0xcc, 0xba, 0x1b, 0xa6, 0xec, 0x39, 0x80, 0xb2, 0xe8, 0xb9,
+ 0xd7, 0x33, 0xff, 0xb3, 0x20, 0xf7, 0x5f, 0xcb, 0xeb, 0x85, 0x31, 0xf5, 0x2a, 0x54, 0x6c, 0xcf,
+ 0x0d, 0x2c, 0xd6, 0x65, 0x79, 0x84, 0x9e, 0x09, 0xb7, 0xc6, 0x7a, 0x48, 0xc0, 0x31, 0x0f, 0x3b,
+ 0x04, 0xba, 0x5e, 0xbf, 0xef, 0x7d, 0xc4, 0x37, 0x52, 0x39, 0xf6, 0x59, 0xb7, 0x78, 0x2b, 0x96,
+ 0x54, 0xf4, 0x0a, 0x94, 0x87, 0x2c, 0x44, 0xf3, 0xa4, 0x4f, 0x2c, 0xc7, 0xa3, 0xde, 0x96, 0xed,
+ 0x38, 0xe2, 0x40, 0xd7, 0x60, 0x9e, 0x3a, 0xae, 0x4d, 0xda, 0xc4, 0xf6, 0xdc, 0x0e, 0xe5, 0xb6,
+ 0x9e, 0x6f, 0x2e, 0x1d, 0x8d, 0xeb, 0xf3, 0x6d, 0xa5, 0x1d, 0x6b, 0x5c, 0xe8, 0x5d, 0xa8, 0xf0,
+ 0xdf, 0xfc, 0xfc, 0x2b, 0xce, 0x7c, 0xfe, 0x9d, 0x66, 0x83, 0x6c, 0x87, 0x00, 0x38, 0xc6, 0x42,
+ 0x57, 0x01, 0x58, 0x9a, 0x42, 0x03, 0x6b, 0x30, 0xa4, 0xfc, 0x24, 0x2f, 0xc7, 0xdb, 0x77, 0x27,
+ 0xa2, 0x60, 0x85, 0x0b, 0xbd, 0x0c, 0x95, 0xc0, 0x72, 0xfa, 0x2d, 0xc7, 0x25, 0x94, 0x47, 0xc2,
+ 0x79, 0xa1, 0x60, 0x27, 0x6c, 0xc4, 0x31, 0x1d, 0x35, 0x00, 0xfa, 0x6c, 0xd3, 0x34, 0x0f, 0x03,
+ 0x42, 0x79, 0xa4, 0x9b, 0x6f, 0x2e, 0x30, 0xf0, 0x56, 0xd4, 0x8a, 0x15, 0x0e, 0x36, 0xeb, 0xae,
+ 0xf7, 0x91, 0xe5, 0x04, 0xb5, 0x8a, 0x3e, 0xeb, 0xf7, 0xbc, 0x77, 0x2d, 0x27, 0xc0, 0x92, 0x8a,
+ 0x9e, 0x87, 0xd2, 0x81, 0xdc, 0x69, 0xc0, 0x41, 0xab, 0xec, 0xd8, 0x0d, 0x77, 0x58, 0x48, 0x43,
+ 0x7b, 0x70, 0xc9, 0x71, 0x29, 0xb1, 0x47, 0x3e, 0x69, 0xef, 0x3b, 0xc3, 0x9d, 0x56, 0xfb, 0x01,
+ 0xf1, 0x9d, 0xee, 0x61, 0xd3, 0xb2, 0xf7, 0x89, 0xdb, 0xa9, 0x55, 0xb9, 0x92, 0x5f, 0x91, 0x4a,
+ 0x2e, 0x6d, 0x1d, 0xc3, 0x8b, 0x8f, 0x45, 0x32, 0x3f, 0x09, 0x0f, 0xf8, 0xfb, 0xa3, 0x60, 0x38,
+ 0x0a, 0xd0, 0x9b, 0x90, 0x0b, 0x3c, 0xb9, 0x6d, 0x9e, 0x53, 0xd6, 0xaa, 0xc1, 0x02, 0xac, 0xf8,
+ 0x20, 0xc7, 0xa4, 0x4b, 0x7c, 0xe2, 0xda, 0xa4, 0x39, 0x77, 0x34, 0xae, 0xe7, 0x76, 0x3c, 0x9c,
+ 0x0b, 0x3c, 0xf4, 0x1e, 0xc0, 0x70, 0x44, 0xf7, 0xda, 0xc4, 0xf6, 0x49, 0x20, 0x4f, 0xf0, 0x17,
+ 0xd3, 0x40, 0x5a, 0x9e, 0x6d, 0xf5, 0x93, 0x48, 0x7c, 0x7e, 0xb7, 0x23, 0x79, 0xac, 0x60, 0xa1,
+ 0x0e, 0x54, 0xf9, 0xc6, 0x6f, 0x59, 0xbb, 0xa4, 0xcf, 0x0c, 0x36, 0x9f, 0xd1, 0xbf, 0x6f, 0x45,
+ 0x52, 0xb1, 0x53, 0x8b, 0xdb, 0x28, 0x56, 0x61, 0xcd, 0xdf, 0x31, 0x60, 0x99, 0x4f, 0xc6, 0xb6,
+ 0x47, 0x03, 0x91, 0xb7, 0x70, 0xcf, 0xff, 0x3c, 0x94, 0xd8, 0x39, 0x60, 0xb9, 0x1d, 0x7e, 0x06,
+ 0x56, 0xc4, 0xaa, 0xad, 0x8b, 0x26, 0x1c, 0xd2, 0xd0, 0x25, 0x28, 0x58, 0x7e, 0x4f, 0x78, 0x86,
+ 0x4a, 0xb3, 0xcc, 0x42, 0x90, 0x35, 0xbf, 0x47, 0x31, 0x6f, 0x65, 0x26, 0x42, 0x6d, 0xdf, 0x19,
+ 0x4e, 0xe4, 0xa2, 0x6d, 0xde, 0x8a, 0x25, 0xd5, 0xfc, 0x69, 0x09, 0xe6, 0xd5, 0xec, 0xfa, 0x4b,
+ 0x88, 0xb9, 0xde, 0x87, 0x72, 0x98, 0xad, 0xc9, 0x55, 0xbb, 0x92, 0x61, 0x6a, 0x45, 0xee, 0x86,
+ 0xa5, 0x60, 0x73, 0x9e, 0xb9, 0x8e, 0xf0, 0x17, 0x8e, 0x00, 0x11, 0x81, 0x25, 0x79, 0xd0, 0x93,
+ 0x4e, 0xf3, 0x90, 0xcf, 0xbd, 0x3c, 0x9f, 0x33, 0xd9, 0xd7, 0xd9, 0xa3, 0x71, 0x7d, 0x69, 0x27,
+ 0x01, 0x80, 0x27, 0x20, 0xd1, 0x1a, 0x14, 0xba, 0xbe, 0x37, 0xe0, 0x9e, 0x29, 0x23, 0x34, 0x5f,
+ 0xa1, 0x5b, 0xbe, 0x37, 0xc0, 0x5c, 0x14, 0xbd, 0x07, 0x73, 0xbb, 0x3c, 0x35, 0x95, 0xbe, 0x2a,
+ 0x53, 0x90, 0x98, 0xcc, 0x65, 0x9b, 0xc0, 0xd6, 0x54, 0x34, 0x63, 0x89, 0x87, 0xae, 0xe8, 0x87,
+ 0xec, 0x1c, 0xdf, 0xfa, 0x8b, 0xc7, 0x1e, 0xb0, 0xaf, 0x43, 0x9e, 0xb8, 0x07, 0xb5, 0x12, 0xb7,
+ 0xf4, 0x8b, 0x69, 0xc3, 0xd9, 0x74, 0x0f, 0x1e, 0x58, 0x7e, 0xb3, 0x2a, 0x97, 0x36, 0xbf, 0xe9,
+ 0x1e, 0x60, 0x26, 0x83, 0xf6, 0xa1, 0xaa, 0x4c, 0x4f, 0xad, 0xcc, 0x21, 0xae, 0xcd, 0x18, 0xb6,
+ 0x89, 0x5c, 0x38, 0xda, 0x33, 0xca, 0x0a, 0x60, 0x15, 0x1d, 0x7d, 0xcf, 0x80, 0x73, 0x1d, 0xcf,
+ 0xde, 0x67, 0xc7, 0xb7, 0x6f, 0x05, 0xa4, 0x77, 0x28, 0x8f, 0x2e, 0xee, 0x09, 0xab, 0x57, 0x6f,
+ 0x66, 0xd0, 0xbb, 0x91, 0x26, 0xdf, 0xbc, 0x70, 0x34, 0xae, 0x9f, 0x4b, 0x25, 0xe1, 0x74, 0x8d,
+ 0xbc, 0x2f, 0x94, 0xaf, 0x42, 0xb2, 0x2f, 0x90, 0xb9, 0x2f, 0xed, 0x34, 0x79, 0xd1, 0x97, 0x54,
+ 0x12, 0x4e, 0xd7, 0x68, 0xfe, 0x53, 0x51, 0x3a, 0x56, 0x59, 0xe2, 0x78, 0x4d, 0x4b, 0x83, 0xeb,
+ 0x89, 0x34, 0x78, 0x51, 0x61, 0x55, 0x72, 0xe0, 0xd8, 0x22, 0x73, 0x4f, 0xd9, 0x22, 0x1b, 0x00,
+ 0x62, 0x0e, 0xbb, 0x4e, 0x9f, 0x84, 0x1e, 0x89, 0x39, 0x88, 0x8d, 0xa8, 0x15, 0x2b, 0x1c, 0xa8,
+ 0x05, 0xf9, 0x9e, 0x8c, 0x71, 0xb3, 0x79, 0x87, 0xdb, 0x4e, 0xa0, 0xf6, 0xa1, 0xc4, 0x2c, 0xf4,
+ 0xb6, 0x13, 0x60, 0x06, 0x83, 0x1e, 0xc0, 0x1c, 0xf7, 0xbb, 0xb4, 0x56, 0xcc, 0x9c, 0xbf, 0xf0,
+ 0x6d, 0x2e, 0xd1, 0x22, 0xdf, 0xc9, 0x1b, 0x29, 0x96, 0x68, 0x2c, 0x2e, 0x60, 0x91, 0x10, 0xf9,
+ 0x38, 0xd8, 0x70, 0x7c, 0x59, 0x37, 0x53, 0xc2, 0xfa, 0x90, 0x82, 0x15, 0x2e, 0xf4, 0x2d, 0x98,
+ 0x97, 0x2b, 0x28, 0x8e, 0xad, 0xd2, 0x8c, 0xc7, 0x96, 0x08, 0x82, 0x14, 0x04, 0xac, 0xe1, 0xa1,
+ 0xdf, 0x84, 0x12, 0xe5, 0x7f, 0xd1, 0x19, 0x76, 0xa2, 0x90, 0x55, 0x27, 0x30, 0xca, 0xd1, 0x05,
+ 0x89, 0xe2, 0x10, 0x15, 0xed, 0xf3, 0x41, 0x77, 0x9d, 0xde, 0x5d, 0x6b, 0xc8, 0x76, 0x1d, 0xd3,
+ 0xf1, 0x6b, 0x99, 0x52, 0x1f, 0x29, 0xa4, 0xaa, 0x51, 0x67, 0x4b, 0x42, 0x62, 0x05, 0xde, 0xfc,
+ 0x79, 0x18, 0x6a, 0xf3, 0x83, 0xd1, 0x4a, 0xa9, 0xba, 0x3d, 0xe5, 0xac, 0x2b, 0xe1, 0xcc, 0x72,
+ 0x5f, 0xa4, 0x33, 0x33, 0xff, 0xa3, 0x14, 0x6e, 0x5a, 0x91, 0x1c, 0x5d, 0x81, 0xe2, 0x70, 0xcf,
+ 0xa2, 0xe1, 0xae, 0x0d, 0x33, 0x93, 0xe2, 0x36, 0x6b, 0x7c, 0x34, 0xae, 0x83, 0x88, 0x16, 0xd8,
+ 0x2f, 0x2c, 0x38, 0x79, 0xc0, 0x6e, 0xb9, 0x36, 0xe9, 0xf7, 0x49, 0x47, 0x86, 0xe0, 0x71, 0xc0,
+ 0x1e, 0x12, 0x70, 0xcc, 0x83, 0x6e, 0x44, 0x55, 0x1b, 0xb1, 0x0b, 0x57, 0xf4, 0xaa, 0xcd, 0x23,
+ 0x66, 0x5d, 0xa2, 0xdc, 0x30, 0xb5, 0x8a, 0x53, 0x38, 0xbe, 0x8a, 0x83, 0xba, 0xb0, 0x40, 0x03,
+ 0xcb, 0x0f, 0xa2, 0xc8, 0xf8, 0x04, 0xc1, 0x38, 0x3a, 0x1a, 0xd7, 0x17, 0xda, 0x1a, 0x0a, 0x4e,
+ 0xa0, 0xa2, 0x11, 0x2c, 0xdb, 0xde, 0x60, 0xd8, 0x27, 0x61, 0x49, 0x4a, 0x28, 0x9b, 0xbd, 0xd2,
+ 0x76, 0x9e, 0xa5, 0x7f, 0xeb, 0x93, 0x50, 0x38, 0x0d, 0x1f, 0xfd, 0x3a, 0x94, 0x3b, 0x23, 0xdf,
+ 0x62, 0x8d, 0x32, 0xb0, 0x7f, 0x36, 0x4c, 0x65, 0x36, 0x64, 0xfb, 0xa3, 0x71, 0xfd, 0x34, 0xcb,
+ 0x05, 0x1a, 0x61, 0x03, 0x8e, 0x44, 0xd0, 0x2e, 0x5c, 0xf4, 0x78, 0xf0, 0x2b, 0x5c, 0x9f, 0x08,
+ 0x30, 0xc2, 0xed, 0x2d, 0xab, 0xdc, 0x61, 0xd9, 0xf2, 0xe2, 0xfd, 0xa9, 0x9c, 0xf8, 0x18, 0x14,
+ 0x74, 0x1b, 0xe6, 0xc4, 0x26, 0x92, 0xa7, 0x62, 0xa6, 0xf8, 0x04, 0xc4, 0x4d, 0x05, 0x13, 0xc3,
+ 0x52, 0x1c, 0x3d, 0x84, 0x39, 0xa1, 0x46, 0x1e, 0x69, 0xd7, 0x66, 0x2b, 0xdc, 0x8a, 0xee, 0xc7,
+ 0xfe, 0x53, 0xfc, 0xc6, 0x12, 0x13, 0xed, 0xf0, 0x32, 0x19, 0xf3, 0xcb, 0x55, 0xbe, 0xcf, 0xb2,
+ 0x14, 0x9a, 0xdb, 0x4c, 0x60, 0xcb, 0xed, 0x7a, 0x5a, 0x79, 0x8c, 0x7b, 0x65, 0x81, 0xc5, 0xbc,
+ 0x72, 0xdf, 0xeb, 0xb5, 0x5d, 0x67, 0x38, 0x24, 0x41, 0x6d, 0x5e, 0xf7, 0xca, 0xad, 0x88, 0x82,
+ 0x15, 0x2e, 0x44, 0xb8, 0x53, 0x13, 0xa5, 0x5c, 0x5a, 0x3b, 0xcd, 0x7b, 0x73, 0x65, 0x86, 0x2a,
+ 0x97, 0x90, 0xd4, 0xdc, 0x99, 0x04, 0xc3, 0x0a, 0xb0, 0x69, 0xcb, 0x92, 0x88, 0x3a, 0x3b, 0xe8,
+ 0x9e, 0x92, 0x03, 0xdd, 0x38, 0xc9, 0xfc, 0xee, 0x78, 0x6a, 0x5a, 0x64, 0xb6, 0x64, 0x56, 0xa1,
+ 0xb3, 0xa0, 0xeb, 0x32, 0xa7, 0xd9, 0x70, 0x7a, 0x84, 0x06, 0xd2, 0xc5, 0xe8, 0x49, 0x8a, 0x20,
+ 0x61, 0x95, 0xcf, 0xfc, 0x49, 0x01, 0x4e, 0x4b, 0x38, 0x11, 0x71, 0xa0, 0xeb, 0x5a, 0x68, 0xf1,
+ 0x6c, 0x22, 0xb4, 0x38, 0xa3, 0x31, 0x2b, 0xc1, 0x85, 0x0f, 0x0b, 0x7a, 0x18, 0x25, 0x83, 0x8c,
+ 0x1b, 0x99, 0x23, 0x36, 0x0d, 0x59, 0x78, 0x08, 0x3d, 0x5e, 0xc3, 0x09, 0x0d, 0x4c, 0xa7, 0x1e,
+ 0x2e, 0xc9, 0x54, 0xe0, 0x46, 0xe6, 0xc8, 0x2c, 0x45, 0xa7, 0x1e, 0x97, 0xe1, 0x84, 0x06, 0xa6,
+ 0xd3, 0x1e, 0xd1, 0xc0, 0x1b, 0x44, 0x3a, 0x0b, 0x99, 0x75, 0xae, 0x73, 0xc1, 0x14, 0x9d, 0xeb,
+ 0x1a, 0x22, 0x4e, 0x68, 0x40, 0x3f, 0x34, 0xe0, 0xfc, 0x07, 0xc4, 0xdd, 0x77, 0x5c, 0xba, 0xed,
+ 0x0c, 0x49, 0xdf, 0x71, 0xe3, 0x11, 0x0b, 0xdf, 0xfb, 0x1b, 0x19, 0xb4, 0xdf, 0xd1, 0x11, 0xf4,
+ 0x6e, 0x7c, 0xe5, 0x68, 0x5c, 0x3f, 0x7f, 0x27, 0x5d, 0x07, 0x9e, 0xa6, 0xdc, 0xfc, 0x6e, 0x51,
+ 0x5a, 0xbc, 0x7a, 0x32, 0xaa, 0x67, 0x89, 0xf1, 0x98, 0xb3, 0xc4, 0x87, 0x05, 0x7e, 0x2b, 0xec,
+ 0xd8, 0xf2, 0x62, 0x6c, 0x06, 0xab, 0xb9, 0xad, 0x09, 0x8a, 0x43, 0x99, 0xcf, 0xa6, 0x4e, 0xc0,
+ 0x09, 0x0d, 0xc8, 0x85, 0xd3, 0x02, 0x3c, 0x54, 0x99, 0xcf, 0x7c, 0xbf, 0x77, 0xdb, 0x09, 0xde,
+ 0x8e, 0xe4, 0x84, 0xc6, 0x33, 0x47, 0xe3, 0xfa, 0x69, 0xad, 0x1d, 0xeb, 0xf0, 0x68, 0x04, 0x4b,
+ 0x4a, 0x99, 0x91, 0x4f, 0x97, 0xb4, 0x99, 0xd7, 0x66, 0x2b, 0x6c, 0x0a, 0x85, 0x3c, 0x85, 0xdd,
+ 0x4a, 0x00, 0xe2, 0x09, 0x15, 0x72, 0x98, 0x7d, 0x2b, 0x1a, 0x66, 0x71, 0x96, 0x61, 0xb6, 0xac,
+ 0xf4, 0x61, 0xc6, 0xed, 0x58, 0x87, 0x47, 0xdf, 0x86, 0xa5, 0xdd, 0xc4, 0x65, 0xaa, 0x3c, 0xab,
+ 0x6f, 0x66, 0xca, 0x33, 0x52, 0xee, 0x61, 0xc5, 0x58, 0x93, 0x24, 0x3c, 0xa1, 0xc7, 0xfc, 0x71,
+ 0x01, 0xd0, 0xe4, 0x2d, 0x01, 0xba, 0xa6, 0xb9, 0xb2, 0xcb, 0x09, 0x57, 0xb6, 0xa4, 0x4a, 0x28,
+ 0x9e, 0xec, 0x21, 0xcc, 0x89, 0xfe, 0xce, 0x50, 0xbd, 0x90, 0x1d, 0x91, 0x60, 0x69, 0x46, 0x21,
+ 0x31, 0x59, 0x00, 0x2f, 0xed, 0x51, 0xda, 0xdd, 0x09, 0xe0, 0xd3, 0xac, 0x3c, 0x44, 0x45, 0x7b,
+ 0xf2, 0x20, 0x10, 0xb6, 0x20, 0x2d, 0xed, 0xfa, 0x89, 0x4a, 0xe8, 0xa2, 0xa8, 0xa0, 0xb4, 0x63,
+ 0x15, 0x5a, 0x4e, 0x54, 0xdf, 0xda, 0x95, 0xa6, 0xf5, 0x04, 0x13, 0xa5, 0x98, 0x95, 0xc4, 0x44,
+ 0x04, 0x2a, 0xd1, 0x3a, 0x4b, 0x43, 0x3a, 0x81, 0x82, 0x74, 0x0b, 0x8a, 0x91, 0xcd, 0x7f, 0x37,
+ 0x64, 0x90, 0xfe, 0xc0, 0xeb, 0x8f, 0x06, 0x04, 0x5d, 0x86, 0x82, 0x6b, 0x0d, 0x42, 0x9b, 0x89,
+ 0x6e, 0xff, 0xf8, 0xa3, 0x06, 0x4e, 0xe1, 0xb7, 0x7f, 0xfc, 0x4c, 0x98, 0x25, 0x8d, 0x8e, 0x35,
+ 0x24, 0x93, 0x4e, 0x59, 0xf8, 0x92, 0x98, 0xe8, 0x7d, 0x98, 0x1b, 0x78, 0x23, 0x37, 0x08, 0xcb,
+ 0x92, 0xaf, 0xcd, 0x86, 0x7e, 0x97, 0xc9, 0xc6, 0xe0, 0xfc, 0x27, 0xc5, 0x12, 0xd2, 0x7c, 0x07,
+ 0x96, 0x92, 0xbc, 0x68, 0x0d, 0x16, 0x3b, 0x84, 0x06, 0x8e, 0xcb, 0xe3, 0xd7, 0x6d, 0x2b, 0xd8,
+ 0x93, 0x63, 0x3f, 0x2f, 0x41, 0x16, 0x37, 0x74, 0x32, 0x4e, 0xf2, 0x9b, 0x7f, 0x99, 0x93, 0xc7,
+ 0x80, 0x3a, 0x42, 0xf4, 0xba, 0xb6, 0xfb, 0x9e, 0x4f, 0xec, 0xbe, 0x73, 0x13, 0x02, 0xca, 0x16,
+ 0xbc, 0x03, 0x73, 0x54, 0x2d, 0xfb, 0xbe, 0x90, 0x16, 0xe0, 0x8a, 0xd4, 0x55, 0x9b, 0x54, 0x1e,
+ 0xe3, 0xca, 0xbc, 0x59, 0x22, 0xa0, 0x07, 0xfc, 0xce, 0x43, 0x64, 0x9c, 0x72, 0xcb, 0xbd, 0x94,
+ 0x06, 0x17, 0xa5, 0xa8, 0x1a, 0xe2, 0x69, 0x79, 0x35, 0x22, 0x48, 0x38, 0x86, 0x42, 0x6f, 0x41,
+ 0xde, 0xa6, 0xce, 0x71, 0x15, 0xc2, 0xf5, 0xf6, 0x96, 0x86, 0xc5, 0xab, 0x16, 0xeb, 0xed, 0x2d,
+ 0xcc, 0x04, 0xcd, 0xdf, 0x2b, 0x81, 0x92, 0xa5, 0xa2, 0xb7, 0x60, 0x81, 0x12, 0xff, 0xc0, 0xb1,
+ 0xc9, 0x9a, 0x6d, 0xb3, 0x85, 0x91, 0xf3, 0x16, 0x3d, 0x13, 0x68, 0x6b, 0x54, 0x9c, 0xe0, 0xe6,
+ 0x6f, 0x30, 0x54, 0xab, 0xcc, 0xfe, 0x06, 0xe3, 0x71, 0xf6, 0x18, 0x57, 0x73, 0xf3, 0x4f, 0xbb,
+ 0x9a, 0xfb, 0x2d, 0x28, 0x53, 0x3d, 0x8c, 0xfa, 0x5a, 0xf6, 0x08, 0x59, 0x46, 0x2e, 0xd1, 0x45,
+ 0x53, 0x14, 0xae, 0x44, 0x98, 0x6c, 0x52, 0x64, 0x7e, 0x53, 0x9c, 0x6d, 0x52, 0x1e, 0x93, 0xd9,
+ 0x7c, 0x03, 0x2a, 0x3e, 0x11, 0x13, 0x44, 0xa5, 0x6f, 0x4a, 0x2d, 0xf1, 0x60, 0xc9, 0x84, 0xc9,
+ 0x87, 0x23, 0xc7, 0x27, 0x03, 0xe2, 0x06, 0x34, 0x4e, 0xe0, 0x43, 0x2a, 0xc5, 0x31, 0x1a, 0xfa,
+ 0x00, 0x60, 0x18, 0xdd, 0x17, 0xc8, 0xf2, 0x51, 0xe6, 0xb4, 0x41, 0xbf, 0x69, 0x88, 0xf3, 0x95,
+ 0xb8, 0x1d, 0x2b, 0xe8, 0xe8, 0x7d, 0xb8, 0x10, 0x67, 0xc0, 0x1b, 0xc4, 0xea, 0xf0, 0xe0, 0x4e,
+ 0x5e, 0xca, 0x89, 0x6b, 0xaa, 0xaf, 0x1e, 0x8d, 0xeb, 0x17, 0xd6, 0xa7, 0x31, 0xe1, 0xe9, 0xf2,
+ 0x68, 0x00, 0xf3, 0xae, 0xd7, 0x21, 0x6d, 0xd2, 0x27, 0x76, 0xe0, 0xf9, 0x32, 0x55, 0xcd, 0x52,
+ 0x4a, 0x12, 0x45, 0x4f, 0xab, 0x7f, 0x4f, 0x11, 0x17, 0x85, 0x31, 0xb5, 0x05, 0x6b, 0xf0, 0xe8,
+ 0x0d, 0x58, 0xe0, 0x4e, 0x6e, 0xc7, 0x1f, 0xd1, 0x80, 0x74, 0xd6, 0xd7, 0x78, 0x4a, 0x5b, 0x16,
+ 0x67, 0xe5, 0x5d, 0x8d, 0x82, 0x13, 0x9c, 0xe6, 0x1f, 0x1a, 0x90, 0xf2, 0x3c, 0x4b, 0x33, 0x7d,
+ 0xe3, 0x69, 0x9b, 0xfe, 0x0b, 0x9a, 0x8b, 0x53, 0x2f, 0x70, 0x34, 0xf7, 0x65, 0xfe, 0x85, 0x01,
+ 0x67, 0xd3, 0x6a, 0x6b, 0xcc, 0x06, 0x63, 0xbf, 0x66, 0xcc, 0x58, 0x66, 0x54, 0x6f, 0x7d, 0xd3,
+ 0x5c, 0xdb, 0x82, 0xe2, 0xe2, 0x37, 0x1c, 0x5f, 0xf6, 0x31, 0xf2, 0x45, 0x1b, 0x1a, 0x15, 0x27,
+ 0xb8, 0xcd, 0xef, 0x17, 0x60, 0x39, 0x25, 0xd7, 0x41, 0x9b, 0xf2, 0x56, 0x65, 0x86, 0x0b, 0xc1,
+ 0xe8, 0x00, 0xd6, 0x6e, 0x56, 0x60, 0x38, 0xea, 0xf7, 0x9f, 0xec, 0x62, 0x30, 0x94, 0xc7, 0x0a,
+ 0x56, 0x78, 0x4d, 0x92, 0x3f, 0xc1, 0x35, 0xc9, 0x1d, 0x40, 0xe4, 0xe3, 0xa1, 0x47, 0x89, 0xcc,
+ 0x59, 0x3d, 0x1e, 0xb7, 0x14, 0xb8, 0x0d, 0x46, 0x4f, 0xaf, 0x36, 0x27, 0x38, 0x70, 0x8a, 0x14,
+ 0x5a, 0x85, 0x4a, 0xd7, 0xf3, 0x6d, 0xc2, 0x7a, 0xc9, 0x3d, 0x97, 0x52, 0xf5, 0xbb, 0x15, 0x12,
+ 0x70, 0xcc, 0x83, 0xde, 0x8b, 0xab, 0xc2, 0x73, 0x99, 0x2f, 0x33, 0xc5, 0x98, 0xb9, 0xa3, 0x98,
+ 0x5e, 0x0e, 0x5e, 0x83, 0x45, 0x2e, 0xb0, 0xb6, 0xbd, 0x15, 0xde, 0x37, 0x95, 0xf4, 0xe8, 0xa0,
+ 0xa9, 0x93, 0x71, 0x92, 0xdf, 0xfc, 0x51, 0x11, 0x96, 0x53, 0x32, 0xfc, 0xe8, 0x8e, 0xcd, 0x78,
+ 0x92, 0x3b, 0xb6, 0x2f, 0xca, 0x12, 0x5e, 0x82, 0x92, 0xeb, 0xad, 0x5b, 0xf6, 0x1e, 0x91, 0xef,
+ 0x19, 0xa2, 0x29, 0xba, 0x27, 0x9a, 0x71, 0x48, 0x0f, 0x8d, 0xa6, 0x70, 0x02, 0xa3, 0x99, 0x79,
+ 0xa1, 0xdf, 0x0a, 0xab, 0x2c, 0x5d, 0xa7, 0x4f, 0x78, 0xac, 0x36, 0x97, 0xd8, 0x99, 0x1a, 0x15,
+ 0x27, 0xb8, 0xd1, 0xd7, 0xa1, 0x22, 0x96, 0xc7, 0xef, 0xd1, 0x0c, 0xb7, 0x81, 0x51, 0x67, 0x9a,
+ 0xa1, 0x10, 0x8e, 0xe5, 0xd1, 0x10, 0xce, 0xf3, 0x74, 0x80, 0xf9, 0xeb, 0x81, 0xf3, 0x6d, 0x11,
+ 0x0f, 0x8a, 0x67, 0x57, 0xa2, 0xce, 0x79, 0xe3, 0x68, 0x5c, 0x3f, 0xbf, 0x95, 0xce, 0xf2, 0x68,
+ 0x3a, 0x09, 0x4f, 0x83, 0x45, 0xdf, 0x80, 0xd2, 0x01, 0x8f, 0xa8, 0xc2, 0x9b, 0x89, 0xc6, 0x6c,
+ 0xd1, 0x71, 0xbc, 0x8a, 0xe2, 0x37, 0xc5, 0x21, 0x9e, 0xf9, 0x7d, 0x03, 0xd2, 0xaf, 0x07, 0xf5,
+ 0x39, 0x33, 0x9e, 0x70, 0xce, 0x9e, 0x8f, 0xed, 0x4a, 0x94, 0xf3, 0xab, 0x69, 0x36, 0x65, 0xfe,
+ 0x91, 0x01, 0xcb, 0x29, 0xf5, 0x8d, 0x5f, 0x8e, 0x23, 0xe9, 0xb3, 0x5c, 0xb2, 0x73, 0x9b, 0x07,
+ 0xc4, 0x0d, 0x4e, 0x76, 0x29, 0xb9, 0x29, 0xae, 0x02, 0x73, 0xb2, 0xaa, 0x9f, 0xa9, 0x38, 0xc1,
+ 0xeb, 0xc3, 0xfa, 0x1d, 0xe0, 0x13, 0x78, 0xee, 0xe9, 0x77, 0xce, 0x85, 0x2f, 0xfb, 0xce, 0xd9,
+ 0xfc, 0x2b, 0x03, 0x16, 0xf4, 0xbb, 0x4e, 0xf4, 0x55, 0xc8, 0x8f, 0x7c, 0x47, 0x4e, 0x6a, 0xd4,
+ 0xfb, 0x77, 0xf0, 0x16, 0x66, 0xed, 0x8c, 0xec, 0x93, 0xae, 0x5c, 0xb1, 0x88, 0x8c, 0x49, 0x17,
+ 0xb3, 0x76, 0x44, 0xa0, 0x3a, 0xf4, 0xbd, 0x8f, 0x0f, 0xc5, 0x39, 0x3f, 0xc3, 0xfb, 0xec, 0xed,
+ 0x58, 0x2a, 0x2e, 0x23, 0x2b, 0x8d, 0x58, 0xc5, 0xe5, 0x11, 0xd4, 0x64, 0x71, 0xec, 0x97, 0xc3,
+ 0x5c, 0xff, 0x2e, 0x07, 0x25, 0x69, 0x34, 0xe8, 0x43, 0x58, 0xe8, 0x69, 0xd3, 0x3b, 0x43, 0xb7,
+ 0x12, 0x77, 0xd0, 0x91, 0xcb, 0xd5, 0xdb, 0x71, 0x42, 0x01, 0xfa, 0x6d, 0x38, 0xd3, 0x73, 0x02,
+ 0x7d, 0x4c, 0x33, 0x54, 0x0e, 0x6e, 0x27, 0x65, 0x9b, 0x17, 0xa4, 0xe2, 0x33, 0x13, 0x24, 0x3c,
+ 0xa9, 0x09, 0xdd, 0x87, 0x82, 0x4f, 0xba, 0xb3, 0x3c, 0x72, 0x62, 0x7b, 0x8a, 0x74, 0xf9, 0x1e,
+ 0x8b, 0xa2, 0x2f, 0x4c, 0xba, 0x14, 0x73, 0x20, 0xf3, 0x77, 0xc5, 0x52, 0x27, 0x0a, 0x84, 0xff,
+ 0x13, 0x9f, 0x4c, 0xfc, 0x97, 0x01, 0x10, 0x77, 0xf6, 0xff, 0xdf, 0xda, 0x9a, 0x7f, 0x9e, 0x83,
+ 0x49, 0x46, 0xb6, 0x2f, 0x6c, 0x91, 0x3d, 0x1a, 0xa9, 0x9f, 0x29, 0x49, 0x2a, 0x7a, 0x08, 0x73,
+ 0x16, 0xff, 0xce, 0x67, 0x86, 0x1e, 0x0b, 0x55, 0xeb, 0x9e, 0x1b, 0xf8, 0x5e, 0xff, 0x1d, 0x4a,
+ 0x7c, 0xe5, 0xe3, 0x1a, 0x8e, 0x85, 0x25, 0x26, 0x22, 0x2c, 0x3d, 0x91, 0xdf, 0xea, 0xcc, 0xf0,
+ 0x4c, 0x7e, 0x52, 0x81, 0x92, 0xaa, 0x48, 0x38, 0x1c, 0x23, 0xcf, 0x70, 0x6f, 0x6d, 0x7e, 0xcf,
+ 0x80, 0xa5, 0x64, 0x35, 0x9d, 0xc9, 0xf3, 0x60, 0x63, 0x6b, 0x23, 0x79, 0x57, 0xb1, 0x25, 0x9a,
+ 0x71, 0x48, 0x47, 0x77, 0xa0, 0xc4, 0x82, 0x4e, 0x2c, 0xbd, 0x6d, 0xc6, 0x90, 0x95, 0x9f, 0xef,
+ 0xb7, 0x84, 0x1c, 0x0e, 0x01, 0xcc, 0x7f, 0x30, 0x00, 0x4d, 0xd6, 0x5b, 0xd1, 0x36, 0x9c, 0x15,
+ 0x5f, 0x62, 0xc8, 0x47, 0x04, 0x5b, 0x5a, 0xd7, 0x2e, 0xc9, 0xae, 0x9d, 0x6d, 0xa5, 0xf0, 0xe0,
+ 0x54, 0xc9, 0x28, 0xc8, 0xce, 0x9d, 0x3c, 0xc8, 0x7e, 0x01, 0xe6, 0x86, 0x6c, 0xae, 0x3a, 0x32,
+ 0x12, 0x8e, 0x56, 0x7c, 0x9b, 0xb7, 0x62, 0x49, 0x35, 0xff, 0x3a, 0x07, 0xb5, 0x69, 0xcf, 0xb0,
+ 0xbf, 0x80, 0x91, 0x3d, 0xd4, 0x46, 0xf6, 0x46, 0xe6, 0x37, 0x3f, 0x81, 0x4f, 0xac, 0xc1, 0x8e,
+ 0xd5, 0x3b, 0x3e, 0xc7, 0x1c, 0xc0, 0xa2, 0xa2, 0xf5, 0x84, 0x9f, 0xdc, 0x44, 0x39, 0x52, 0x4b,
+ 0x87, 0xc2, 0x49, 0x6c, 0xb3, 0x0d, 0x10, 0xbf, 0x23, 0xcd, 0x50, 0x83, 0x7e, 0x0e, 0x8a, 0x07,
+ 0x56, 0x7f, 0x14, 0x7e, 0xb9, 0x18, 0xbd, 0x06, 0x7f, 0xc0, 0x1a, 0xb1, 0xa0, 0x99, 0x7f, 0x9c,
+ 0x83, 0xaa, 0xf2, 0xce, 0xe9, 0x69, 0xa5, 0xdf, 0xcf, 0x40, 0xce, 0xa2, 0x3c, 0xdd, 0xa9, 0x88,
+ 0x8b, 0xe9, 0x35, 0x8a, 0x73, 0x16, 0x45, 0xef, 0x42, 0x71, 0x68, 0x05, 0x7b, 0xe1, 0x5b, 0xf6,
+ 0xab, 0xb3, 0xbd, 0xc2, 0x62, 0xe9, 0x49, 0x3c, 0x0e, 0xf6, 0x8b, 0x62, 0x81, 0x97, 0xc8, 0xf2,
+ 0xf2, 0x4f, 0x2f, 0xcb, 0x33, 0xbf, 0x6b, 0xc0, 0x62, 0xa2, 0x0f, 0xe8, 0x2a, 0x00, 0x8d, 0x7e,
+ 0xc9, 0x25, 0x88, 0x0a, 0x69, 0x31, 0x1f, 0x56, 0xb8, 0x9e, 0xb8, 0x60, 0xd2, 0x87, 0xf3, 0x53,
+ 0x8c, 0x93, 0xa5, 0x88, 0x6c, 0xc5, 0xe9, 0xd0, 0xb2, 0x49, 0xf2, 0xc9, 0xfe, 0xbd, 0x90, 0x80,
+ 0x63, 0x9e, 0xc8, 0x78, 0x72, 0xd3, 0x8c, 0xc7, 0xfc, 0x47, 0x03, 0x2e, 0x1d, 0x77, 0x19, 0xcc,
+ 0x92, 0x7e, 0x79, 0xe3, 0x1b, 0xa5, 0x99, 0x89, 0x2b, 0x81, 0x3b, 0x3a, 0x19, 0x27, 0xf9, 0xd1,
+ 0x75, 0xa8, 0x2a, 0x4d, 0xb2, 0x33, 0x51, 0x1c, 0xa9, 0x88, 0x63, 0x95, 0xef, 0x09, 0xc2, 0x78,
+ 0xf3, 0x6f, 0x0d, 0x38, 0x9b, 0x56, 0x39, 0x44, 0xbd, 0xf0, 0x1b, 0x0b, 0x91, 0xbb, 0x35, 0x4f,
+ 0x58, 0x81, 0x6c, 0xf0, 0x2f, 0x2d, 0x36, 0xdd, 0xc0, 0x3f, 0x4c, 0xff, 0xfa, 0xe2, 0xe2, 0x4d,
+ 0x80, 0x98, 0x07, 0x2d, 0x41, 0x7e, 0x9f, 0x1c, 0x8a, 0x89, 0xc3, 0xec, 0x4f, 0x74, 0x56, 0xdb,
+ 0xb4, 0x72, 0x97, 0xbe, 0x91, 0xbb, 0x69, 0xbc, 0x51, 0xfe, 0x83, 0x3f, 0xa9, 0x9f, 0xfa, 0xce,
+ 0x2f, 0x2e, 0x9f, 0x32, 0x7f, 0x60, 0x80, 0x1a, 0x65, 0xa3, 0x97, 0xa1, 0xb2, 0x17, 0x04, 0x43,
+ 0xde, 0x24, 0x9f, 0x74, 0xf1, 0x2b, 0x89, 0xb7, 0x77, 0x76, 0xb6, 0x79, 0x23, 0x8e, 0xe9, 0xa8,
+ 0x01, 0xc0, 0x7e, 0x50, 0xc1, 0x5d, 0x88, 0x9f, 0x61, 0x32, 0xee, 0xb6, 0x60, 0x57, 0x38, 0x44,
+ 0x32, 0x2a, 0x98, 0xc5, 0xa7, 0x7b, 0x32, 0x19, 0x15, 0x9c, 0x21, 0xcd, 0xfc, 0x33, 0x03, 0xce,
+ 0x4c, 0x3c, 0x21, 0x44, 0xdb, 0x51, 0xf8, 0x3d, 0x6b, 0xf1, 0x71, 0x4a, 0xa0, 0xfe, 0xc4, 0xbb,
+ 0xe8, 0x26, 0x9c, 0x15, 0x88, 0x5c, 0x6b, 0xbc, 0x85, 0x1e, 0xeb, 0x4e, 0xcd, 0x3f, 0x35, 0x00,
+ 0xe2, 0x72, 0x18, 0xda, 0x85, 0x79, 0xd1, 0x25, 0x2d, 0x8e, 0xcc, 0x3e, 0xc0, 0xb3, 0x52, 0xc5,
+ 0x7c, 0x5b, 0x41, 0xc1, 0x1a, 0x26, 0xdb, 0xd7, 0xbc, 0x0a, 0xcd, 0x77, 0x57, 0x4e, 0xdf, 0xd7,
+ 0x77, 0x43, 0x02, 0x8e, 0x79, 0xcc, 0x9f, 0xe7, 0x61, 0x39, 0xe5, 0xd1, 0xca, 0xff, 0xe9, 0xa2,
+ 0xea, 0x4b, 0x50, 0x12, 0xdf, 0x31, 0xd0, 0x64, 0x74, 0x27, 0x3e, 0x73, 0xa0, 0x38, 0xa4, 0xa3,
+ 0x2b, 0x50, 0x75, 0x5c, 0x5b, 0xdc, 0xb1, 0x58, 0x61, 0x31, 0x4d, 0xdc, 0x5f, 0xc7, 0xcd, 0x58,
+ 0xe5, 0xd1, 0xab, 0x6f, 0x73, 0x19, 0xaa, 0x6f, 0x5f, 0x60, 0xf9, 0xe9, 0x9b, 0x70, 0x66, 0x22,
+ 0xf4, 0xcd, 0x16, 0x07, 0x10, 0xfe, 0xf9, 0x7c, 0x22, 0x0e, 0x10, 0x5f, 0xcd, 0x0b, 0x9a, 0xf9,
+ 0x43, 0x03, 0x16, 0x12, 0x39, 0xc2, 0x89, 0x4a, 0x35, 0xf7, 0xd5, 0x52, 0xcd, 0xc9, 0xf2, 0x1b,
+ 0xad, 0x68, 0x63, 0xde, 0x81, 0xf4, 0x57, 0xf0, 0xc9, 0xc5, 0x34, 0x1e, 0xbf, 0x98, 0xe6, 0x4f,
+ 0x72, 0x50, 0x89, 0x1e, 0x0f, 0xa2, 0x57, 0xb5, 0x99, 0xbb, 0xa0, 0xce, 0xdc, 0xa3, 0x71, 0x5d,
+ 0x30, 0x2a, 0xd3, 0xf8, 0x3e, 0x54, 0xa2, 0xc7, 0xa7, 0x51, 0x29, 0x2a, 0x7b, 0x9c, 0x17, 0x59,
+ 0x4d, 0xf4, 0xa2, 0x15, 0xc7, 0x78, 0x2c, 0xf4, 0x0d, 0x5f, 0x87, 0xde, 0x75, 0xfa, 0x7d, 0x87,
+ 0xca, 0x0b, 0xb6, 0x3c, 0xbf, 0x60, 0x8b, 0x42, 0xdf, 0x8d, 0x14, 0x1e, 0x9c, 0x2a, 0x89, 0xb6,
+ 0xa1, 0x48, 0x03, 0x32, 0xa4, 0xb2, 0xe6, 0xfc, 0x72, 0xa6, 0x77, 0x95, 0x64, 0xc8, 0x53, 0xfa,
+ 0xc8, 0x44, 0x58, 0x0b, 0xc5, 0x02, 0xc8, 0xfc, 0x37, 0x03, 0xca, 0x21, 0x0b, 0x7a, 0x45, 0x9b,
+ 0xbc, 0x5a, 0x62, 0xf2, 0x38, 0xdf, 0xff, 0xda, 0xb9, 0x33, 0xc7, 0x06, 0x2c, 0xe8, 0x6f, 0x44,
+ 0x94, 0x42, 0x92, 0x71, 0x5c, 0x21, 0x09, 0xbd, 0x02, 0x65, 0xab, 0xdf, 0xf7, 0x3e, 0xda, 0x74,
+ 0x0f, 0x64, 0xf1, 0x36, 0xba, 0x7b, 0x5e, 0x93, 0xed, 0x38, 0xe2, 0x40, 0x07, 0xb0, 0x28, 0xe4,
+ 0xe2, 0xd7, 0xbf, 0xf9, 0xcc, 0x57, 0xa0, 0x69, 0xe7, 0x58, 0x73, 0x99, 0x45, 0x5e, 0x6d, 0x1d,
+ 0x13, 0x27, 0x95, 0x34, 0x6f, 0x7f, 0xfa, 0xf9, 0xca, 0xa9, 0x9f, 0x7d, 0xbe, 0x72, 0xea, 0xb3,
+ 0xcf, 0x57, 0x4e, 0x7d, 0xe7, 0x68, 0xc5, 0xf8, 0xf4, 0x68, 0xc5, 0xf8, 0xd9, 0xd1, 0x8a, 0xf1,
+ 0xd9, 0xd1, 0x8a, 0xf1, 0x2f, 0x47, 0x2b, 0xc6, 0xef, 0xff, 0xeb, 0xca, 0xa9, 0x6f, 0x3e, 0xfb,
+ 0xd8, 0x7f, 0x49, 0xf3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x88, 0x7d, 0x3c, 0xce, 0xb6, 0x46,
+ 0x00, 0x00,
+}
+
+func (m *BinaryBuildRequestOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BinaryBuildRequestOptions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BinaryBuildRequestOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.CommitterEmail)
+ copy(dAtA[i:], m.CommitterEmail)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterEmail)))
+ i--
+ dAtA[i] = 0x42
+ i -= len(m.CommitterName)
+ copy(dAtA[i:], m.CommitterName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterName)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.AuthorEmail)
+ copy(dAtA[i:], m.AuthorEmail)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorEmail)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.AuthorName)
+ copy(dAtA[i:], m.AuthorName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorName)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Commit)
+ copy(dAtA[i:], m.Commit)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.AsFile)
+ copy(dAtA[i:], m.AsFile)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BinaryBuildSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BinaryBuildSource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BinaryBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.AsFile)
+ copy(dAtA[i:], m.AsFile)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BitbucketWebHookCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BitbucketWebHookCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BitbucketWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Build) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Build) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Build) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildConfigList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildConfigList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildConfigSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildConfigSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.FailedBuildsHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedBuildsHistoryLimit))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.SuccessfulBuildsHistoryLimit != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulBuildsHistoryLimit))
+ i--
+ dAtA[i] = 0x20
+ }
+ {
+ size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.RunPolicy)
+ copy(dAtA[i:], m.RunPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RunPolicy)))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Triggers) > 0 {
+ for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Triggers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildConfigStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ImageChangeTriggers) > 0 {
+ for iNdEx := len(m.ImageChangeTriggers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ImageChangeTriggers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.LastVersion))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildLog) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildLog) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildLog) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildLogOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildLogOptions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.InsecureSkipTLSVerifyBackend {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x58
+ if m.Version != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Version))
+ i--
+ dAtA[i] = 0x50
+ }
+ i--
+ if m.NoWait {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x48
+ if m.LimitBytes != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes))
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.TailLines != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines))
+ i--
+ dAtA[i] = 0x38
+ }
+ i--
+ if m.Timestamps {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ if m.SinceTime != nil {
+ {
+ size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.SinceSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds))
+ i--
+ dAtA[i] = 0x20
+ }
+ i--
+ if m.Previous {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ i--
+ if m.Follow {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Container)
+ copy(dAtA[i:], m.Container)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildOutput) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildOutput) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ImageLabels) > 0 {
+ for iNdEx := len(m.ImageLabels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ImageLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.PushSecret != nil {
+ {
+ size, err := m.PushSecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.To != nil {
+ {
+ size, err := m.To.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildPostCommitSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildPostCommitSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildPostCommitSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Script)
+ copy(dAtA[i:], m.Script)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Script)))
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Args) > 0 {
+ for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Args[iNdEx])
+ copy(dAtA[i:], m.Args[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Command) > 0 {
+ for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Command[iNdEx])
+ copy(dAtA[i:], m.Command[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SourceStrategyOptions != nil {
+ {
+ size, err := m.SourceStrategyOptions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.DockerStrategyOptions != nil {
+ {
+ size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.TriggeredBy) > 0 {
+ for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.LastVersion != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LastVersion))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.Binary != nil {
+ {
+ size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.From != nil {
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.TriggeredByImage != nil {
+ {
+ size, err := m.TriggeredByImage.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Revision != nil {
+ {
+ size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildSource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ConfigMaps) > 0 {
+ for iNdEx := len(m.ConfigMaps) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ConfigMaps[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ if len(m.Secrets) > 0 {
+ for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if m.SourceSecret != nil {
+ {
+ size, err := m.SourceSecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ i -= len(m.ContextDir)
+ copy(dAtA[i:], m.ContextDir)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContextDir)))
+ i--
+ dAtA[i] = 0x32
+ if len(m.Images) > 0 {
+ for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Git != nil {
+ {
+ size, err := m.Git.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Dockerfile != nil {
+ i -= len(*m.Dockerfile)
+ copy(dAtA[i:], *m.Dockerfile)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Dockerfile)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Binary != nil {
+ {
+ size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.TriggeredBy) > 0 {
+ for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ }
+ i -= len(m.LogSnippet)
+ copy(dAtA[i:], m.LogSnippet)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.LogSnippet)))
+ i--
+ dAtA[i] = 0x62
+ if len(m.Stages) > 0 {
+ for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Stages[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ }
+ {
+ size, err := m.Output.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ if m.Config != nil {
+ {
+ size, err := m.Config.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ i -= len(m.OutputDockerImageReference)
+ copy(dAtA[i:], m.OutputDockerImageReference)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutputDockerImageReference)))
+ i--
+ dAtA[i] = 0x42
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Duration))
+ i--
+ dAtA[i] = 0x38
+ if m.CompletionTimestamp != nil {
+ {
+ size, err := m.CompletionTimestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.StartTimestamp != nil {
+ {
+ size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x1a
+ i--
+ if m.Cancelled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Phase)
+ copy(dAtA[i:], m.Phase)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildStatusOutput) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildStatusOutput) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildStatusOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.To != nil {
+ {
+ size, err := m.To.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildStatusOutputTo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildStatusOutputTo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildStatusOutputTo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.ImageDigest)
+ copy(dAtA[i:], m.ImageDigest)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageDigest)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.JenkinsPipelineStrategy != nil {
+ {
+ size, err := m.JenkinsPipelineStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.CustomStrategy != nil {
+ {
+ size, err := m.CustomStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.SourceStrategy != nil {
+ {
+ size, err := m.SourceStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.DockerStrategy != nil {
+ {
+ size, err := m.DockerStrategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildTriggerCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildTriggerCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildTriggerCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.BitbucketWebHook != nil {
+ {
+ size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.GitLabWebHook != nil {
+ {
+ size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.ImageChangeBuild != nil {
+ {
+ size, err := m.ImageChangeBuild.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.GitHubWebHook != nil {
+ {
+ size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.GenericWebHook != nil {
+ {
+ size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildTriggerPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildTriggerPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.BitbucketWebHook != nil {
+ {
+ size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.GitLabWebHook != nil {
+ {
+ size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.ImageChange != nil {
+ {
+ size, err := m.ImageChange.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.GenericWebHook != nil {
+ {
+ size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.GitHubWebHook != nil {
+ {
+ size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildVolume) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildVolume) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Mounts) > 0 {
+ for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.Source.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildVolumeMount) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildVolumeMount) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildVolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.DestinationPath)
+ copy(dAtA[i:], m.DestinationPath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationPath)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildVolumeSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.CSI != nil {
+ {
+ size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ConfigMap != nil {
+ {
+ size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Secret != nil {
+ {
+ size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CommonSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CommonSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CommonSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MountTrustedCA != nil {
+ i--
+ if *m.MountTrustedCA {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x50
+ }
+ if m.NodeSelector != nil {
+ {
+ size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.CompletionDeadlineSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.CompletionDeadlineSeconds))
+ i--
+ dAtA[i] = 0x40
+ }
+ {
+ size, err := m.PostCommit.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ {
+ size, err := m.Output.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ {
+ size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if m.Revision != nil {
+ {
+ size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.Source.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.ServiceAccount)
+ copy(dAtA[i:], m.ServiceAccount)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccount)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CommonWebHookCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CommonWebHookCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CommonWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Secret)
+ copy(dAtA[i:], m.Secret)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret)))
+ i--
+ dAtA[i] = 0x12
+ if m.Revision != nil {
+ {
+ size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigMapBuildSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigMapBuildSource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMapBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.DestinationDir)
+ copy(dAtA[i:], m.DestinationDir)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CustomBuildStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CustomBuildStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CustomBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.BuildAPIVersion)
+ copy(dAtA[i:], m.BuildAPIVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.BuildAPIVersion)))
+ i--
+ dAtA[i] = 0x3a
+ if len(m.Secrets) > 0 {
+ for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i--
+ if m.ForcePull {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ i--
+ if m.ExposeDockerSocket {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.PullSecret != nil {
+ {
+ size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *DockerBuildStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DockerBuildStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DockerBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Volumes) > 0 {
+ for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ if m.ImageOptimizationPolicy != nil {
+ i -= len(*m.ImageOptimizationPolicy)
+ copy(dAtA[i:], *m.ImageOptimizationPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageOptimizationPolicy)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.BuildArgs) > 0 {
+ for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ i -= len(m.DockerfilePath)
+ copy(dAtA[i:], m.DockerfilePath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerfilePath)))
+ i--
+ dAtA[i] = 0x32
+ i--
+ if m.ForcePull {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i--
+ if m.NoCache {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ if m.PullSecret != nil {
+ {
+ size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.From != nil {
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DockerStrategyOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DockerStrategyOptions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DockerStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NoCache != nil {
+ i--
+ if *m.NoCache {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.BuildArgs) > 0 {
+ for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GenericWebHookCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenericWebHookCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenericWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Secret)
+ copy(dAtA[i:], m.Secret)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret)))
+ i--
+ dAtA[i] = 0x12
+ if m.Revision != nil {
+ {
+ size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GenericWebHookEvent) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenericWebHookEvent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenericWebHookEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.DockerStrategyOptions != nil {
+ {
+ size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Git != nil {
+ {
+ size, err := m.Git.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GitBuildSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GitBuildSource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GitBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProxyConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Ref)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.URI)
+ copy(dAtA[i:], m.URI)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.URI)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GitHubWebHookCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GitHubWebHookCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GitHubWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Secret)
+ copy(dAtA[i:], m.Secret)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret)))
+ i--
+ dAtA[i] = 0x12
+ if m.Revision != nil {
+ {
+ size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GitInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GitInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Refs) > 0 {
+ for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Refs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GitLabWebHookCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GitLabWebHookCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GitLabWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GitRefInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GitRefInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GitRefInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GitSourceRevision) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GitSourceRevision) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GitSourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.Committer.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Author.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Commit)
+ copy(dAtA[i:], m.Commit)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageChangeCause) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageChangeCause) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageChangeCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.FromRef != nil {
+ {
+ size, err := m.FromRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.ImageID)
+ copy(dAtA[i:], m.ImageID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageChangeTrigger) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageChangeTrigger) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageChangeTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Paused {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ if m.From != nil {
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.LastTriggeredImageID)
+ copy(dAtA[i:], m.LastTriggeredImageID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageChangeTriggerStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageChangeTriggerStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageChangeTriggerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LastTriggerTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.LastTriggeredImageID)
+ copy(dAtA[i:], m.LastTriggeredImageID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageLabel) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageLabel) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageSource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.As) > 0 {
+ for iNdEx := len(m.As) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.As[iNdEx])
+ copy(dAtA[i:], m.As[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.As[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.PullSecret != nil {
+ {
+ size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Paths) > 0 {
+ for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageSourcePath) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageSourcePath) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageSourcePath) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.DestinationDir)
+ copy(dAtA[i:], m.DestinationDir)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.SourcePath)
+ copy(dAtA[i:], m.SourcePath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourcePath)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamTagReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamTagReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamTagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *JenkinsPipelineBuildStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *JenkinsPipelineBuildStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *JenkinsPipelineBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Jenkinsfile)
+ copy(dAtA[i:], m.Jenkinsfile)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Jenkinsfile)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.JenkinsfilePath)
+ copy(dAtA[i:], m.JenkinsfilePath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.JenkinsfilePath)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m OptionalNodeSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m OptionalNodeSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m OptionalNodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m) > 0 {
+ keysForItems := make([]string, 0, len(m))
+ for k := range m {
+ keysForItems = append(keysForItems, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForItems)
+ for iNdEx := len(keysForItems) - 1; iNdEx >= 0; iNdEx-- {
+ v := m[string(keysForItems[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForItems[iNdEx])
+ copy(dAtA[i:], keysForItems[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForItems[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ProxyConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProxyConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NoProxy != nil {
+ i -= len(*m.NoProxy)
+ copy(dAtA[i:], *m.NoProxy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NoProxy)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.HTTPSProxy != nil {
+ i -= len(*m.HTTPSProxy)
+ copy(dAtA[i:], *m.HTTPSProxy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPSProxy)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.HTTPProxy != nil {
+ i -= len(*m.HTTPProxy)
+ copy(dAtA[i:], *m.HTTPProxy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPProxy)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretBuildSource) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretBuildSource) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.DestinationDir)
+ copy(dAtA[i:], m.DestinationDir)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretLocalReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretLocalReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretLocalReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.MountPath)
+ copy(dAtA[i:], m.MountPath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.SecretSource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SourceBuildStrategy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SourceBuildStrategy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SourceBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Volumes) > 0 {
+ for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ i--
+ if m.ForcePull {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ if m.Incremental != nil {
+ i--
+ if *m.Incremental {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ i -= len(m.Scripts)
+ copy(dAtA[i:], m.Scripts)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scripts)))
+ i--
+ dAtA[i] = 0x22
+ if len(m.Env) > 0 {
+ for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.PullSecret != nil {
+ {
+ size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SourceControlUser) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SourceControlUser) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SourceControlUser) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Email)
+ copy(dAtA[i:], m.Email)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Email)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SourceRevision) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SourceRevision) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Git != nil {
+ {
+ size, err := m.Git.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SourceStrategyOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SourceStrategyOptions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SourceStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Incremental != nil {
+ i--
+ if *m.Incremental {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StageInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StageInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StageInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Steps) > 0 {
+ for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds))
+ i--
+ dAtA[i] = 0x18
+ {
+ size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StepInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StepInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StepInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds))
+ i--
+ dAtA[i] = 0x18
+ {
+ size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *WebHookTrigger) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WebHookTrigger) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WebHookTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SecretReference != nil {
+ {
+ size, err := m.SecretReference.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i--
+ if m.AllowEnv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Secret)
+ copy(dAtA[i:], m.Secret)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *BinaryBuildRequestOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.AsFile)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Commit)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.AuthorName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.AuthorEmail)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.CommitterName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.CommitterEmail)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BinaryBuildSource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.AsFile)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BitbucketWebHookCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.CommonWebHookCause.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Build) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BuildCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BuildConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BuildConfigList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildConfigSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Triggers) > 0 {
+ for _, e := range m.Triggers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.RunPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.CommonSpec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SuccessfulBuildsHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.SuccessfulBuildsHistoryLimit))
+ }
+ if m.FailedBuildsHistoryLimit != nil {
+ n += 1 + sovGenerated(uint64(*m.FailedBuildsHistoryLimit))
+ }
+ return n
+}
+
+func (m *BuildConfigStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.LastVersion))
+ if len(m.ImageChangeTriggers) > 0 {
+ for _, e := range m.ImageChangeTriggers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildLog) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *BuildLogOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Container)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ n += 2
+ if m.SinceSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.SinceSeconds))
+ }
+ if m.SinceTime != nil {
+ l = m.SinceTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if m.TailLines != nil {
+ n += 1 + sovGenerated(uint64(*m.TailLines))
+ }
+ if m.LimitBytes != nil {
+ n += 1 + sovGenerated(uint64(*m.LimitBytes))
+ }
+ n += 2
+ if m.Version != nil {
+ n += 1 + sovGenerated(uint64(*m.Version))
+ }
+ n += 2
+ return n
+}
+
+func (m *BuildOutput) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.To != nil {
+ l = m.To.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PushSecret != nil {
+ l = m.PushSecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ImageLabels) > 0 {
+ for _, e := range m.ImageLabels {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildPostCommitSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Command) > 0 {
+ for _, s := range m.Command {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Args) > 0 {
+ for _, s := range m.Args {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.Script)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BuildRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Revision != nil {
+ l = m.Revision.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.TriggeredByImage != nil {
+ l = m.TriggeredByImage.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.From != nil {
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Binary != nil {
+ l = m.Binary.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.LastVersion != nil {
+ n += 1 + sovGenerated(uint64(*m.LastVersion))
+ }
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.TriggeredBy) > 0 {
+ for _, e := range m.TriggeredBy {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.DockerStrategyOptions != nil {
+ l = m.DockerStrategyOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SourceStrategyOptions != nil {
+ l = m.SourceStrategyOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BuildSource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Binary != nil {
+ l = m.Binary.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Dockerfile != nil {
+ l = len(*m.Dockerfile)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Git != nil {
+ l = m.Git.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Images) > 0 {
+ for _, e := range m.Images {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ContextDir)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.SourceSecret != nil {
+ l = m.SourceSecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Secrets) > 0 {
+ for _, e := range m.Secrets {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ConfigMaps) > 0 {
+ for _, e := range m.ConfigMaps {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.CommonSpec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.TriggeredBy) > 0 {
+ for _, e := range m.TriggeredBy {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.StartTimestamp != nil {
+ l = m.StartTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CompletionTimestamp != nil {
+ l = m.CompletionTimestamp.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.Duration))
+ l = len(m.OutputDockerImageReference)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Config != nil {
+ l = m.Config.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Output.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Stages) > 0 {
+ for _, e := range m.Stages {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.LogSnippet)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildStatusOutput) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.To != nil {
+ l = m.To.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BuildStatusOutputTo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ImageDigest)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BuildStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.DockerStrategy != nil {
+ l = m.DockerStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SourceStrategy != nil {
+ l = m.SourceStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CustomStrategy != nil {
+ l = m.CustomStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.JenkinsPipelineStrategy != nil {
+ l = m.JenkinsPipelineStrategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BuildTriggerCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.GenericWebHook != nil {
+ l = m.GenericWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GitHubWebHook != nil {
+ l = m.GitHubWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageChangeBuild != nil {
+ l = m.ImageChangeBuild.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GitLabWebHook != nil {
+ l = m.GitLabWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.BitbucketWebHook != nil {
+ l = m.BitbucketWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BuildTriggerPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.GitHubWebHook != nil {
+ l = m.GitHubWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GenericWebHook != nil {
+ l = m.GenericWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ImageChange != nil {
+ l = m.ImageChange.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.GitLabWebHook != nil {
+ l = m.GitLabWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.BitbucketWebHook != nil {
+ l = m.BitbucketWebHook.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *BuildVolume) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Source.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Mounts) > 0 {
+ for _, e := range m.Mounts {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BuildVolumeMount) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.DestinationPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BuildVolumeSource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Secret != nil {
+ l = m.Secret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ConfigMap != nil {
+ l = m.ConfigMap.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CSI != nil {
+ l = m.CSI.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *CommonSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ServiceAccount)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Source.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Revision != nil {
+ l = m.Revision.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.Strategy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Output.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Resources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.PostCommit.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.CompletionDeadlineSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.CompletionDeadlineSeconds))
+ }
+ if m.NodeSelector != nil {
+ l = m.NodeSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MountTrustedCA != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *CommonWebHookCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Revision != nil {
+ l = m.Revision.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Secret)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ConfigMapBuildSource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ConfigMap.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DestinationDir)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CustomBuildStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.PullSecret != nil {
+ l = m.PullSecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 2
+ n += 2
+ if len(m.Secrets) > 0 {
+ for _, e := range m.Secrets {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.BuildAPIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *DockerBuildStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.From != nil {
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PullSecret != nil {
+ l = m.PullSecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 2
+ l = len(m.DockerfilePath)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.BuildArgs) > 0 {
+ for _, e := range m.BuildArgs {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ImageOptimizationPolicy != nil {
+ l = len(*m.ImageOptimizationPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Volumes) > 0 {
+ for _, e := range m.Volumes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DockerStrategyOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.BuildArgs) > 0 {
+ for _, e := range m.BuildArgs {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.NoCache != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *GenericWebHookCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Revision != nil {
+ l = m.Revision.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Secret)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GenericWebHookEvent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Git != nil {
+ l = m.Git.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.DockerStrategyOptions != nil {
+ l = m.DockerStrategyOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *GitBuildSource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.URI)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Ref)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ProxyConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GitHubWebHookCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Revision != nil {
+ l = m.Revision.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Secret)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GitInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.GitBuildSource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.GitSourceRevision.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Refs) > 0 {
+ for _, e := range m.Refs {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *GitLabWebHookCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.CommonWebHookCause.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GitRefInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.GitBuildSource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.GitSourceRevision.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *GitSourceRevision) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Commit)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Author.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Committer.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageChangeCause) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ImageID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.FromRef != nil {
+ l = m.FromRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ImageChangeTrigger) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.LastTriggeredImageID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.From != nil {
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ return n
+}
+
+func (m *ImageChangeTriggerStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.LastTriggeredImageID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTriggerTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageLabel) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageSource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Paths) > 0 {
+ for _, e := range m.Paths {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.PullSecret != nil {
+ l = m.PullSecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.As) > 0 {
+ for _, s := range m.As {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageSourcePath) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SourcePath)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DestinationDir)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamTagReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *JenkinsPipelineBuildStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.JenkinsfilePath)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Jenkinsfile)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m OptionalNodeSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for k, v := range m {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ProxyConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.HTTPProxy != nil {
+ l = len(*m.HTTPProxy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.HTTPSProxy != nil {
+ l = len(*m.HTTPSProxy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NoProxy != nil {
+ l = len(*m.NoProxy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SecretBuildSource) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Secret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DestinationDir)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SecretLocalReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SecretSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.SecretSource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.MountPath)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SourceBuildStrategy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.PullSecret != nil {
+ l = m.PullSecret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Env) > 0 {
+ for _, e := range m.Env {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.Scripts)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Incremental != nil {
+ n += 2
+ }
+ n += 2
+ if len(m.Volumes) > 0 {
+ for _, e := range m.Volumes {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SourceControlUser) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Email)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SourceRevision) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Git != nil {
+ l = m.Git.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *SourceStrategyOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Incremental != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *StageInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.StartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.DurationMilliseconds))
+ if len(m.Steps) > 0 {
+ for _, e := range m.Steps {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StepInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.StartTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.DurationMilliseconds))
+ return n
+}
+
+func (m *WebHookTrigger) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Secret)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.SecretReference != nil {
+ l = m.SecretReference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *BinaryBuildRequestOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BinaryBuildRequestOptions{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`,
+ `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `AuthorName:` + fmt.Sprintf("%v", this.AuthorName) + `,`,
+ `AuthorEmail:` + fmt.Sprintf("%v", this.AuthorEmail) + `,`,
+ `CommitterName:` + fmt.Sprintf("%v", this.CommitterName) + `,`,
+ `CommitterEmail:` + fmt.Sprintf("%v", this.CommitterEmail) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BinaryBuildSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BinaryBuildSource{`,
+ `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BitbucketWebHookCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BitbucketWebHookCause{`,
+ `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Build) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Build{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildSpec", "BuildSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildStatus", "BuildStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildConfig{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildConfigSpec", "BuildConfigSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildConfigStatus", "BuildConfigStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildConfigList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]BuildConfig{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BuildConfig", "BuildConfig", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&BuildConfigList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildConfigSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTriggers := "[]BuildTriggerPolicy{"
+ for _, f := range this.Triggers {
+ repeatedStringForTriggers += strings.Replace(strings.Replace(f.String(), "BuildTriggerPolicy", "BuildTriggerPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTriggers += "}"
+ s := strings.Join([]string{`&BuildConfigSpec{`,
+ `Triggers:` + repeatedStringForTriggers + `,`,
+ `RunPolicy:` + fmt.Sprintf("%v", this.RunPolicy) + `,`,
+ `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`,
+ `SuccessfulBuildsHistoryLimit:` + valueToStringGenerated(this.SuccessfulBuildsHistoryLimit) + `,`,
+ `FailedBuildsHistoryLimit:` + valueToStringGenerated(this.FailedBuildsHistoryLimit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildConfigStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForImageChangeTriggers := "[]ImageChangeTriggerStatus{"
+ for _, f := range this.ImageChangeTriggers {
+ repeatedStringForImageChangeTriggers += strings.Replace(strings.Replace(f.String(), "ImageChangeTriggerStatus", "ImageChangeTriggerStatus", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForImageChangeTriggers += "}"
+ s := strings.Join([]string{`&BuildConfigStatus{`,
+ `LastVersion:` + fmt.Sprintf("%v", this.LastVersion) + `,`,
+ `ImageChangeTriggers:` + repeatedStringForImageChangeTriggers + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Build{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Build", "Build", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&BuildList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildLog) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildLog{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildLogOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildLogOptions{`,
+ `Container:` + fmt.Sprintf("%v", this.Container) + `,`,
+ `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`,
+ `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`,
+ `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`,
+ `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`,
+ `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`,
+ `TailLines:` + valueToStringGenerated(this.TailLines) + `,`,
+ `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`,
+ `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`,
+ `Version:` + valueToStringGenerated(this.Version) + `,`,
+ `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildOutput) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForImageLabels := "[]ImageLabel{"
+ for _, f := range this.ImageLabels {
+ repeatedStringForImageLabels += strings.Replace(strings.Replace(f.String(), "ImageLabel", "ImageLabel", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForImageLabels += "}"
+ s := strings.Join([]string{`&BuildOutput{`,
+ `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `PushSecret:` + strings.Replace(fmt.Sprintf("%v", this.PushSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `ImageLabels:` + repeatedStringForImageLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildPostCommitSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildPostCommitSpec{`,
+ `Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+ `Args:` + fmt.Sprintf("%v", this.Args) + `,`,
+ `Script:` + fmt.Sprintf("%v", this.Script) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnv := "[]EnvVar{"
+ for _, f := range this.Env {
+ repeatedStringForEnv += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnv += "}"
+ repeatedStringForTriggeredBy := "[]BuildTriggerCause{"
+ for _, f := range this.TriggeredBy {
+ repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTriggeredBy += "}"
+ s := strings.Join([]string{`&BuildRequest{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`,
+ `TriggeredByImage:` + strings.Replace(fmt.Sprintf("%v", this.TriggeredByImage), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`,
+ `LastVersion:` + valueToStringGenerated(this.LastVersion) + `,`,
+ `Env:` + repeatedStringForEnv + `,`,
+ `TriggeredBy:` + repeatedStringForTriggeredBy + `,`,
+ `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`,
+ `SourceStrategyOptions:` + strings.Replace(this.SourceStrategyOptions.String(), "SourceStrategyOptions", "SourceStrategyOptions", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForImages := "[]ImageSource{"
+ for _, f := range this.Images {
+ repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageSource", "ImageSource", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForImages += "}"
+ repeatedStringForSecrets := "[]SecretBuildSource{"
+ for _, f := range this.Secrets {
+ repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretBuildSource", "SecretBuildSource", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSecrets += "}"
+ repeatedStringForConfigMaps := "[]ConfigMapBuildSource{"
+ for _, f := range this.ConfigMaps {
+ repeatedStringForConfigMaps += strings.Replace(strings.Replace(f.String(), "ConfigMapBuildSource", "ConfigMapBuildSource", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConfigMaps += "}"
+ s := strings.Join([]string{`&BuildSource{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`,
+ `Dockerfile:` + valueToStringGenerated(this.Dockerfile) + `,`,
+ `Git:` + strings.Replace(this.Git.String(), "GitBuildSource", "GitBuildSource", 1) + `,`,
+ `Images:` + repeatedStringForImages + `,`,
+ `ContextDir:` + fmt.Sprintf("%v", this.ContextDir) + `,`,
+ `SourceSecret:` + strings.Replace(fmt.Sprintf("%v", this.SourceSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `Secrets:` + repeatedStringForSecrets + `,`,
+ `ConfigMaps:` + repeatedStringForConfigMaps + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTriggeredBy := "[]BuildTriggerCause{"
+ for _, f := range this.TriggeredBy {
+ repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTriggeredBy += "}"
+ s := strings.Join([]string{`&BuildSpec{`,
+ `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`,
+ `TriggeredBy:` + repeatedStringForTriggeredBy + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForStages := "[]StageInfo{"
+ for _, f := range this.Stages {
+ repeatedStringForStages += strings.Replace(strings.Replace(f.String(), "StageInfo", "StageInfo", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForStages += "}"
+ repeatedStringForConditions := "[]BuildCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "BuildCondition", "BuildCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&BuildStatus{`,
+ `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+ `Cancelled:` + fmt.Sprintf("%v", this.Cancelled) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `StartTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StartTimestamp), "Time", "v1.Time", 1) + `,`,
+ `CompletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTimestamp), "Time", "v1.Time", 1) + `,`,
+ `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`,
+ `OutputDockerImageReference:` + fmt.Sprintf("%v", this.OutputDockerImageReference) + `,`,
+ `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildStatusOutput", "BuildStatusOutput", 1), `&`, ``, 1) + `,`,
+ `Stages:` + repeatedStringForStages + `,`,
+ `LogSnippet:` + fmt.Sprintf("%v", this.LogSnippet) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildStatusOutput) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildStatusOutput{`,
+ `To:` + strings.Replace(this.To.String(), "BuildStatusOutputTo", "BuildStatusOutputTo", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildStatusOutputTo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildStatusOutputTo{`,
+ `ImageDigest:` + fmt.Sprintf("%v", this.ImageDigest) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildStrategy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `DockerStrategy:` + strings.Replace(this.DockerStrategy.String(), "DockerBuildStrategy", "DockerBuildStrategy", 1) + `,`,
+ `SourceStrategy:` + strings.Replace(this.SourceStrategy.String(), "SourceBuildStrategy", "SourceBuildStrategy", 1) + `,`,
+ `CustomStrategy:` + strings.Replace(this.CustomStrategy.String(), "CustomBuildStrategy", "CustomBuildStrategy", 1) + `,`,
+ `JenkinsPipelineStrategy:` + strings.Replace(this.JenkinsPipelineStrategy.String(), "JenkinsPipelineBuildStrategy", "JenkinsPipelineBuildStrategy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildTriggerCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildTriggerCause{`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "GenericWebHookCause", "GenericWebHookCause", 1) + `,`,
+ `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "GitHubWebHookCause", "GitHubWebHookCause", 1) + `,`,
+ `ImageChangeBuild:` + strings.Replace(this.ImageChangeBuild.String(), "ImageChangeCause", "ImageChangeCause", 1) + `,`,
+ `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "GitLabWebHookCause", "GitLabWebHookCause", 1) + `,`,
+ `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "BitbucketWebHookCause", "BitbucketWebHookCause", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildTriggerPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildTriggerPolicy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`,
+ `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`,
+ `ImageChange:` + strings.Replace(this.ImageChange.String(), "ImageChangeTrigger", "ImageChangeTrigger", 1) + `,`,
+ `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`,
+ `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildVolume) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForMounts := "[]BuildVolumeMount{"
+ for _, f := range this.Mounts {
+ repeatedStringForMounts += strings.Replace(strings.Replace(f.String(), "BuildVolumeMount", "BuildVolumeMount", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMounts += "}"
+ s := strings.Join([]string{`&BuildVolume{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildVolumeSource", "BuildVolumeSource", 1), `&`, ``, 1) + `,`,
+ `Mounts:` + repeatedStringForMounts + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildVolumeMount) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildVolumeMount{`,
+ `DestinationPath:` + fmt.Sprintf("%v", this.DestinationPath) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BuildVolumeSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BuildVolumeSource{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "v11.SecretVolumeSource", 1) + `,`,
+ `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "v11.ConfigMapVolumeSource", 1) + `,`,
+ `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "v11.CSIVolumeSource", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CommonSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CommonSpec{`,
+ `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`,
+ `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildSource", "BuildSource", 1), `&`, ``, 1) + `,`,
+ `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`,
+ `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "BuildStrategy", "BuildStrategy", 1), `&`, ``, 1) + `,`,
+ `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildOutput", "BuildOutput", 1), `&`, ``, 1) + `,`,
+ `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v11.ResourceRequirements", 1), `&`, ``, 1) + `,`,
+ `PostCommit:` + strings.Replace(strings.Replace(this.PostCommit.String(), "BuildPostCommitSpec", "BuildPostCommitSpec", 1), `&`, ``, 1) + `,`,
+ `CompletionDeadlineSeconds:` + valueToStringGenerated(this.CompletionDeadlineSeconds) + `,`,
+ `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "OptionalNodeSelector", "OptionalNodeSelector", 1) + `,`,
+ `MountTrustedCA:` + valueToStringGenerated(this.MountTrustedCA) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CommonWebHookCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CommonWebHookCause{`,
+ `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`,
+ `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ConfigMapBuildSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ConfigMapBuildSource{`,
+ `ConfigMap:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`,
+ `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CustomBuildStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnv := "[]EnvVar{"
+ for _, f := range this.Env {
+ repeatedStringForEnv += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnv += "}"
+ repeatedStringForSecrets := "[]SecretSpec{"
+ for _, f := range this.Secrets {
+ repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSecrets += "}"
+ s := strings.Join([]string{`&CustomBuildStrategy{`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `Env:` + repeatedStringForEnv + `,`,
+ `ExposeDockerSocket:` + fmt.Sprintf("%v", this.ExposeDockerSocket) + `,`,
+ `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`,
+ `Secrets:` + repeatedStringForSecrets + `,`,
+ `BuildAPIVersion:` + fmt.Sprintf("%v", this.BuildAPIVersion) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DockerBuildStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnv := "[]EnvVar{"
+ for _, f := range this.Env {
+ repeatedStringForEnv += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnv += "}"
+ repeatedStringForBuildArgs := "[]EnvVar{"
+ for _, f := range this.BuildArgs {
+ repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForBuildArgs += "}"
+ repeatedStringForVolumes := "[]BuildVolume{"
+ for _, f := range this.Volumes {
+ repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVolumes += "}"
+ s := strings.Join([]string{`&DockerBuildStrategy{`,
+ `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `NoCache:` + fmt.Sprintf("%v", this.NoCache) + `,`,
+ `Env:` + repeatedStringForEnv + `,`,
+ `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`,
+ `DockerfilePath:` + fmt.Sprintf("%v", this.DockerfilePath) + `,`,
+ `BuildArgs:` + repeatedStringForBuildArgs + `,`,
+ `ImageOptimizationPolicy:` + valueToStringGenerated(this.ImageOptimizationPolicy) + `,`,
+ `Volumes:` + repeatedStringForVolumes + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DockerStrategyOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForBuildArgs := "[]EnvVar{"
+ for _, f := range this.BuildArgs {
+ repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForBuildArgs += "}"
+ s := strings.Join([]string{`&DockerStrategyOptions{`,
+ `BuildArgs:` + repeatedStringForBuildArgs + `,`,
+ `NoCache:` + valueToStringGenerated(this.NoCache) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GenericWebHookCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GenericWebHookCause{`,
+ `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`,
+ `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GenericWebHookEvent) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnv := "[]EnvVar{"
+ for _, f := range this.Env {
+ repeatedStringForEnv += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnv += "}"
+ s := strings.Join([]string{`&GenericWebHookEvent{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Git:` + strings.Replace(this.Git.String(), "GitInfo", "GitInfo", 1) + `,`,
+ `Env:` + repeatedStringForEnv + `,`,
+ `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GitBuildSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GitBuildSource{`,
+ `URI:` + fmt.Sprintf("%v", this.URI) + `,`,
+ `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+ `ProxyConfig:` + strings.Replace(strings.Replace(this.ProxyConfig.String(), "ProxyConfig", "ProxyConfig", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GitHubWebHookCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GitHubWebHookCause{`,
+ `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`,
+ `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GitInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRefs := "[]GitRefInfo{"
+ for _, f := range this.Refs {
+ repeatedStringForRefs += strings.Replace(strings.Replace(f.String(), "GitRefInfo", "GitRefInfo", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRefs += "}"
+ s := strings.Join([]string{`&GitInfo{`,
+ `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`,
+ `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`,
+ `Refs:` + repeatedStringForRefs + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GitLabWebHookCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GitLabWebHookCause{`,
+ `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GitRefInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GitRefInfo{`,
+ `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`,
+ `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GitSourceRevision) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GitSourceRevision{`,
+ `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`,
+ `Author:` + strings.Replace(strings.Replace(this.Author.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`,
+ `Committer:` + strings.Replace(strings.Replace(this.Committer.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageChangeCause) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageChangeCause{`,
+ `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`,
+ `FromRef:` + strings.Replace(fmt.Sprintf("%v", this.FromRef), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageChangeTrigger) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageChangeTrigger{`,
+ `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`,
+ `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageChangeTriggerStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageChangeTriggerStatus{`,
+ `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`,
+ `From:` + strings.Replace(strings.Replace(this.From.String(), "ImageStreamTagReference", "ImageStreamTagReference", 1), `&`, ``, 1) + `,`,
+ `LastTriggerTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTriggerTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageLabel) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageLabel{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForPaths := "[]ImageSourcePath{"
+ for _, f := range this.Paths {
+ repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "ImageSourcePath", "ImageSourcePath", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPaths += "}"
+ s := strings.Join([]string{`&ImageSource{`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `Paths:` + repeatedStringForPaths + `,`,
+ `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `As:` + fmt.Sprintf("%v", this.As) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageSourcePath) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageSourcePath{`,
+ `SourcePath:` + fmt.Sprintf("%v", this.SourcePath) + `,`,
+ `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamTagReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageStreamTagReference{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *JenkinsPipelineBuildStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnv := "[]EnvVar{"
+ for _, f := range this.Env {
+ repeatedStringForEnv += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnv += "}"
+ s := strings.Join([]string{`&JenkinsPipelineBuildStrategy{`,
+ `JenkinsfilePath:` + fmt.Sprintf("%v", this.JenkinsfilePath) + `,`,
+ `Jenkinsfile:` + fmt.Sprintf("%v", this.Jenkinsfile) + `,`,
+ `Env:` + repeatedStringForEnv + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProxyConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProxyConfig{`,
+ `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`,
+ `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`,
+ `NoProxy:` + valueToStringGenerated(this.NoProxy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretBuildSource) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SecretBuildSource{`,
+ `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`,
+ `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretLocalReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SecretLocalReference{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SecretSpec{`,
+ `SecretSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretSource), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`,
+ `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SourceBuildStrategy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEnv := "[]EnvVar{"
+ for _, f := range this.Env {
+ repeatedStringForEnv += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForEnv += "}"
+ repeatedStringForVolumes := "[]BuildVolume{"
+ for _, f := range this.Volumes {
+ repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVolumes += "}"
+ s := strings.Join([]string{`&SourceBuildStrategy{`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `Env:` + repeatedStringForEnv + `,`,
+ `Scripts:` + fmt.Sprintf("%v", this.Scripts) + `,`,
+ `Incremental:` + valueToStringGenerated(this.Incremental) + `,`,
+ `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`,
+ `Volumes:` + repeatedStringForVolumes + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SourceControlUser) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SourceControlUser{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Email:` + fmt.Sprintf("%v", this.Email) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SourceRevision) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SourceRevision{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Git:` + strings.Replace(this.Git.String(), "GitSourceRevision", "GitSourceRevision", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SourceStrategyOptions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SourceStrategyOptions{`,
+ `Incremental:` + valueToStringGenerated(this.Incremental) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StageInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSteps := "[]StepInfo{"
+ for _, f := range this.Steps {
+ repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "StepInfo", "StepInfo", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSteps += "}"
+ s := strings.Join([]string{`&StageInfo{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`,
+ `Steps:` + repeatedStringForSteps + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StepInfo) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StepInfo{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WebHookTrigger) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WebHookTrigger{`,
+ `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
+ `AllowEnv:` + fmt.Sprintf("%v", this.AllowEnv) + `,`,
+ `SecretReference:` + strings.Replace(this.SecretReference.String(), "SecretLocalReference", "SecretLocalReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *BinaryBuildRequestOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BinaryBuildRequestOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BinaryBuildRequestOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AsFile = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Commit = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthorName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AuthorName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthorEmail", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AuthorEmail = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommitterName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CommitterName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommitterEmail", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CommitterEmail = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BinaryBuildSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BinaryBuildSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BinaryBuildSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AsFile = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BitbucketWebHookCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BitbucketWebHookCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BitbucketWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Build) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Build: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = BuildConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildConfigList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildConfigList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildConfigList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, BuildConfig{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildConfigSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildConfigSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Triggers = append(m.Triggers, BuildTriggerPolicy{})
+ if err := m.Triggers[len(m.Triggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RunPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RunPolicy = BuildRunPolicy(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulBuildsHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SuccessfulBuildsHistoryLimit = &v
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailedBuildsHistoryLimit", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.FailedBuildsHistoryLimit = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildConfigStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildConfigStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType)
+ }
+ m.LastVersion = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.LastVersion |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeTriggers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImageChangeTriggers = append(m.ImageChangeTriggers, ImageChangeTriggerStatus{})
+ if err := m.ImageChangeTriggers[len(m.ImageChangeTriggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Build{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildLog) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildLog: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildLog: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildLogOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildLogOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildLogOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Container = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Follow = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Previous = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SinceSeconds = &v
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SinceTime == nil {
+ m.SinceTime = &v1.Time{}
+ }
+ if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Timestamps = bool(v != 0)
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TailLines = &v
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LimitBytes = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NoWait = bool(v != 0)
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Version = &v
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.InsecureSkipTLSVerifyBackend = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildOutput) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildOutput: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildOutput: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.To == nil {
+ m.To = &v11.ObjectReference{}
+ }
+ if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PushSecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PushSecret == nil {
+ m.PushSecret = &v11.LocalObjectReference{}
+ }
+ if err := m.PushSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageLabels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImageLabels = append(m.ImageLabels, ImageLabel{})
+ if err := m.ImageLabels[len(m.ImageLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildPostCommitSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildPostCommitSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildPostCommitSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Args = append(m.Args, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Script = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Revision == nil {
+ m.Revision = &SourceRevision{}
+ }
+ if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TriggeredByImage", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TriggeredByImage == nil {
+ m.TriggeredByImage = &v11.ObjectReference{}
+ }
+ if err := m.TriggeredByImage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.From == nil {
+ m.From = &v11.ObjectReference{}
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Binary == nil {
+ m.Binary = &BinaryBuildSource{}
+ }
+ if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LastVersion = &v
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, v11.EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{})
+ if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DockerStrategyOptions == nil {
+ m.DockerStrategyOptions = &DockerStrategyOptions{}
+ }
+ if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategyOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SourceStrategyOptions == nil {
+ m.SourceStrategyOptions = &SourceStrategyOptions{}
+ }
+ if err := m.SourceStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = BuildSourceType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Binary == nil {
+ m.Binary = &BinaryBuildSource{}
+ }
+ if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dockerfile", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Dockerfile = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Git == nil {
+ m.Git = &GitBuildSource{}
+ }
+ if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Images = append(m.Images, ImageSource{})
+ if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ContextDir", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ContextDir = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourceSecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SourceSecret == nil {
+ m.SourceSecret = &v11.LocalObjectReference{}
+ }
+ if err := m.SourceSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secrets = append(m.Secrets, SecretBuildSource{})
+ if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMaps", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConfigMaps = append(m.ConfigMaps, ConfigMapBuildSource{})
+ if err := m.ConfigMaps[len(m.ConfigMaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{})
+ if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = BuildPhase(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cancelled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Cancelled = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = StatusReason(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.StartTimestamp == nil {
+ m.StartTimestamp = &v1.Time{}
+ }
+ if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompletionTimestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CompletionTimestamp == nil {
+ m.CompletionTimestamp = &v1.Time{}
+ }
+ if err := m.CompletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType)
+ }
+ m.Duration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Duration |= time.Duration(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OutputDockerImageReference", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OutputDockerImageReference = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Config == nil {
+ m.Config = &v11.ObjectReference{}
+ }
+ if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Stages = append(m.Stages, StageInfo{})
+ if err := m.Stages[len(m.Stages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogSnippet", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LogSnippet = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, BuildCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildStatusOutput) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildStatusOutput: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildStatusOutput: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.To == nil {
+ m.To = &BuildStatusOutputTo{}
+ }
+ if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildStatusOutputTo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildStatusOutputTo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildStatusOutputTo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageDigest", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImageDigest = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = BuildStrategyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DockerStrategy == nil {
+ m.DockerStrategy = &DockerBuildStrategy{}
+ }
+ if err := m.DockerStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SourceStrategy == nil {
+ m.SourceStrategy = &SourceBuildStrategy{}
+ }
+ if err := m.SourceStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CustomStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CustomStrategy == nil {
+ m.CustomStrategy = &CustomBuildStrategy{}
+ }
+ if err := m.CustomStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field JenkinsPipelineStrategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.JenkinsPipelineStrategy == nil {
+ m.JenkinsPipelineStrategy = &JenkinsPipelineBuildStrategy{}
+ }
+ if err := m.JenkinsPipelineStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildTriggerCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildTriggerCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildTriggerCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GenericWebHook == nil {
+ m.GenericWebHook = &GenericWebHookCause{}
+ }
+ if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GitHubWebHook == nil {
+ m.GitHubWebHook = &GitHubWebHookCause{}
+ }
+ if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeBuild", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageChangeBuild == nil {
+ m.ImageChangeBuild = &ImageChangeCause{}
+ }
+ if err := m.ImageChangeBuild.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GitLabWebHook == nil {
+ m.GitLabWebHook = &GitLabWebHookCause{}
+ }
+ if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.BitbucketWebHook == nil {
+ m.BitbucketWebHook = &BitbucketWebHookCause{}
+ }
+ if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildTriggerPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildTriggerPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = BuildTriggerType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GitHubWebHook == nil {
+ m.GitHubWebHook = &WebHookTrigger{}
+ }
+ if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GenericWebHook == nil {
+ m.GenericWebHook = &WebHookTrigger{}
+ }
+ if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageChange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ImageChange == nil {
+ m.ImageChange = &ImageChangeTrigger{}
+ }
+ if err := m.ImageChange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.GitLabWebHook == nil {
+ m.GitLabWebHook = &WebHookTrigger{}
+ }
+ if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.BitbucketWebHook == nil {
+ m.BitbucketWebHook = &WebHookTrigger{}
+ }
+ if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildVolume) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildVolume: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Mounts = append(m.Mounts, BuildVolumeMount{})
+ if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildVolumeMount) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildVolumeMount: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildVolumeMount: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DestinationPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DestinationPath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BuildVolumeSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildVolumeSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = BuildVolumeSourceType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Secret == nil {
+ m.Secret = &v11.SecretVolumeSource{}
+ }
+ if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfigMap == nil {
+ m.ConfigMap = &v11.ConfigMapVolumeSource{}
+ }
+ if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.CSI == nil {
+ m.CSI = &v11.CSIVolumeSource{}
+ }
+ if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CommonSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CommonSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CommonSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceAccount = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Revision == nil {
+ m.Revision = &SourceRevision{}
+ }
+ if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PostCommit", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.PostCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompletionDeadlineSeconds", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CompletionDeadlineSeconds = &v
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NodeSelector == nil {
+ m.NodeSelector = OptionalNodeSelector{}
+ }
+ if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MountTrustedCA", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.MountTrustedCA = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CommonWebHookCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CommonWebHookCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CommonWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Revision == nil {
+ m.Revision = &SourceRevision{}
+ }
+ if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secret = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigMapBuildSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigMapBuildSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigMapBuildSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DestinationDir = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CustomBuildStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomBuildStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PullSecret == nil {
+ m.PullSecret = &v11.LocalObjectReference{}
+ }
+ if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, v11.EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExposeDockerSocket", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ExposeDockerSocket = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ForcePull = bool(v != 0)
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secrets = append(m.Secrets, SecretSpec{})
+ if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuildAPIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BuildAPIVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DockerBuildStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DockerBuildStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DockerBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.From == nil {
+ m.From = &v11.ObjectReference{}
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PullSecret == nil {
+ m.PullSecret = &v11.LocalObjectReference{}
+ }
+ if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NoCache = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, v11.EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ForcePull = bool(v != 0)
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerfilePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerfilePath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BuildArgs = append(m.BuildArgs, v11.EnvVar{})
+ if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageOptimizationPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ImageOptimizationPolicy(dAtA[iNdEx:postIndex])
+ m.ImageOptimizationPolicy = &s
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Volumes = append(m.Volumes, BuildVolume{})
+ if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DockerStrategyOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DockerStrategyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DockerStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BuildArgs = append(m.BuildArgs, v11.EnvVar{})
+ if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.NoCache = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GenericWebHookCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenericWebHookCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenericWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Revision == nil {
+ m.Revision = &SourceRevision{}
+ }
+ if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secret = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GenericWebHookEvent) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenericWebHookEvent: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenericWebHookEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = BuildSourceType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Git == nil {
+ m.Git = &GitInfo{}
+ }
+ if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, v11.EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DockerStrategyOptions == nil {
+ m.DockerStrategyOptions = &DockerStrategyOptions{}
+ }
+ if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GitBuildSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GitBuildSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GitBuildSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field URI", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.URI = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ref = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProxyConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProxyConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GitHubWebHookCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GitHubWebHookCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GitHubWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Revision == nil {
+ m.Revision = &SourceRevision{}
+ }
+ if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secret = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GitInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GitInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GitInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Refs = append(m.Refs, GitRefInfo{})
+ if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GitLabWebHookCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GitLabWebHookCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GitLabWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GitRefInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GitRefInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GitRefInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GitSourceRevision) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GitSourceRevision: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GitSourceRevision: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Commit = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Author", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Author.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Committer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Committer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageChangeCause) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageChangeCause: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageChangeCause: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImageID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FromRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FromRef == nil {
+ m.FromRef = &v11.ObjectReference{}
+ }
+ if err := m.FromRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageChangeTrigger) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageChangeTrigger: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageChangeTrigger: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.From == nil {
+ m.From = &v11.ObjectReference{}
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Paused = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageChangeTriggerStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageChangeTriggerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageChangeTriggerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTriggerTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTriggerTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageLabel) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageLabel: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageLabel: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Paths = append(m.Paths, ImageSourcePath{})
+ if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PullSecret == nil {
+ m.PullSecret = &v11.LocalObjectReference{}
+ }
+ if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field As", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.As = append(m.As, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageSourcePath) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageSourcePath: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageSourcePath: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourcePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SourcePath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DestinationDir = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamTagReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamTagReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamTagReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *JenkinsPipelineBuildStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field JenkinsfilePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.JenkinsfilePath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Jenkinsfile", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Jenkinsfile = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, v11.EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OptionalNodeSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OptionalNodeSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OptionalNodeSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if *m == nil {
+ *m = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ (*m)[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProxyConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProxyConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.HTTPProxy = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.HTTPSProxy = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NoProxy = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretBuildSource) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretBuildSource: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretBuildSource: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DestinationDir = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretLocalReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretLocalReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretLocalReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretSource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SecretSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MountPath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SourceBuildStrategy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SourceBuildStrategy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SourceBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PullSecret == nil {
+ m.PullSecret = &v11.LocalObjectReference{}
+ }
+ if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Env = append(m.Env, v11.EnvVar{})
+ if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scripts", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scripts = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Incremental = &b
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ForcePull = bool(v != 0)
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Volumes = append(m.Volumes, BuildVolume{})
+ if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SourceControlUser) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SourceControlUser: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SourceControlUser: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Email = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SourceRevision) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SourceRevision: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SourceRevision: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = BuildSourceType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Git == nil {
+ m.Git = &GitSourceRevision{}
+ }
+ if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SourceStrategyOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SourceStrategyOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SourceStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Incremental = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StageInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StageInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StageInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = StageName(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType)
+ }
+ m.DurationMilliseconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DurationMilliseconds |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Steps = append(m.Steps, StepInfo{})
+ if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StepInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StepInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StepInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = StepName(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType)
+ }
+ m.DurationMilliseconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DurationMilliseconds |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WebHookTrigger) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WebHookTrigger: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WebHookTrigger: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secret = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowEnv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllowEnv = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretReference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SecretReference == nil {
+ m.SecretReference = &SecretLocalReference{}
+ }
+ if err := m.SecretReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto
new file mode 100644
index 0000000000..57b54f3923
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/generated.proto
@@ -0,0 +1,1239 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.build.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/build/v1";
+
+// BinaryBuildRequestOptions are the options required to fully speficy a binary build request
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BinaryBuildRequestOptions {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // asFile determines if the binary should be created as a file within the source rather than extracted as an archive
+ optional string asFile = 2;
+
+ // revision.commit is the value identifying a specific commit
+ optional string revisionCommit = 3;
+
+ // revision.message is the description of a specific commit
+ optional string revisionMessage = 4;
+
+ // revision.authorName of the source control user
+ optional string revisionAuthorName = 5;
+
+ // revision.authorEmail of the source control user
+ optional string revisionAuthorEmail = 6;
+
+ // revision.committerName of the source control user
+ optional string revisionCommitterName = 7;
+
+ // revision.committerEmail of the source control user
+ optional string revisionCommitterEmail = 8;
+}
+
+// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies,
+// where the file will be extracted and used as the build source.
+message BinaryBuildSource {
+ // asFile indicates that the provided binary input should be considered a single file
+ // within the build input. For example, specifying "webapp.war" would place the provided
+ // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build
+ // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source.
+ // The custom strategy receives this binary as standard input. This filename may not
+ // contain slashes or be '..' or '.'.
+ optional string asFile = 1;
+}
+
+// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a
+// build.
+message BitbucketWebHookCause {
+ optional CommonWebHookCause commonSpec = 1;
+}
+
+// Build encapsulates the inputs needed to produce a new deployable image, as well as
+// the status of the execution and a reference to the Pod which executed the build.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message Build {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is all the inputs used to execute the build.
+ optional BuildSpec spec = 2;
+
+ // status is the current status of the build.
+ // +optional
+ optional BuildStatus status = 3;
+}
+
+// BuildCondition describes the state of a build at a certain point.
+message BuildCondition {
+ // Type of build condition.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // The last time this condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
+
+ // The last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // The reason for the condition's last transition.
+ optional string reason = 4;
+
+ // A human readable message indicating details about the transition.
+ optional string message = 5;
+}
+
+// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.
+//
+// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BuildConfig {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec holds all the input necessary to produce a new build, and the conditions when
+ // to trigger them.
+ optional BuildConfigSpec spec = 2;
+
+ // status holds any relevant information about a build config
+ // +optional
+ optional BuildConfigStatus status = 3;
+}
+
+// BuildConfigList is a collection of BuildConfigs.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BuildConfigList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a list of build configs
+ repeated BuildConfig items = 2;
+}
+
+// BuildConfigSpec describes when and how builds are created
+message BuildConfigSpec {
+ // triggers determine how new Builds can be launched from a BuildConfig. If
+ // no triggers are defined, a new build can only occur as a result of an
+ // explicit client build creation.
+ // +optional
+ repeated BuildTriggerPolicy triggers = 1;
+
+ // RunPolicy describes how the new build created from this build
+ // configuration will be scheduled for execution.
+ // This is optional, if not specified we default to "Serial".
+ optional string runPolicy = 2;
+
+ // CommonSpec is the desired build specification
+ optional CommonSpec commonSpec = 3;
+
+ // successfulBuildsHistoryLimit is the number of old successful builds to retain.
+ // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set.
+ // If removed after the BuildConfig has been created, all successful builds are retained.
+ optional int32 successfulBuildsHistoryLimit = 4;
+
+ // failedBuildsHistoryLimit is the number of old failed builds to retain.
+ // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set.
+ // If removed after the BuildConfig has been created, all failed builds are retained.
+ optional int32 failedBuildsHistoryLimit = 5;
+}
+
+// BuildConfigStatus contains current state of the build config object.
+message BuildConfigStatus {
+ // lastVersion is used to inform about number of last triggered build.
+ optional int64 lastVersion = 1;
+
+ // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec,
+ // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry
+ // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.
+ repeated ImageChangeTriggerStatus imageChangeTriggers = 2;
+}
+
+// BuildList is a collection of Builds.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BuildList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a list of builds
+ repeated Build items = 2;
+}
+
+// BuildLog is the (unused) resource associated with the build log redirector
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BuildLog {
+}
+
+// BuildLogOptions is the REST options for a build log
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BuildLogOptions {
+ // cointainer for which to stream logs. Defaults to only container if there is one container in the pod.
+ optional string container = 1;
+
+ // follow if true indicates that the build log should be streamed until
+ // the build terminates.
+ optional bool follow = 2;
+
+ // previous returns previous build logs. Defaults to false.
+ optional bool previous = 3;
+
+ // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ optional int64 sinceSeconds = 4;
+
+ // sinceTime is an RFC3339 timestamp from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
+
+ // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+ // of log output. Defaults to false.
+ optional bool timestamps = 6;
+
+ // tailLines, If set, is the number of lines from the end of the logs to show. If not specified,
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ optional int64 tailLines = 7;
+
+ // limitBytes, If set, is the number of bytes to read from the server before terminating the
+ // log output. This may not display a complete final line of logging, and may return
+ // slightly more or slightly less than the specified limit.
+ optional int64 limitBytes = 8;
+
+ // noWait if true causes the call to return immediately even if the build
+ // is not available yet. Otherwise the server will wait until the build has started.
+ // TODO: Fix the tag to 'noWait' in v2
+ optional bool nowait = 9;
+
+ // version of the build for which to view logs.
+ optional int64 version = 10;
+
+ // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
+ // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
+ // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
+ // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
+ // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
+ // the actual log data coming from the real kubelet).
+ // +optional
+ optional bool insecureSkipTLSVerifyBackend = 11;
+}
+
+// BuildOutput is input to a build strategy and describes the container image that the strategy
+// should produce.
+message BuildOutput {
+ // to defines an optional location to push the output of this build to.
+ // Kind must be one of 'ImageStreamTag' or 'DockerImage'.
+ // This value will be used to look up a container image repository to push to.
+ // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of
+ // the build unless Namespace is specified.
+ optional k8s.io.api.core.v1.ObjectReference to = 1;
+
+ // PushSecret is the name of a Secret that would be used for setting
+ // up the authentication for executing the Docker push to authentication
+ // enabled Docker Registry (or Docker Hub).
+ optional k8s.io.api.core.v1.LocalObjectReference pushSecret = 2;
+
+ // imageLabels define a list of labels that are applied to the resulting image. If there
+ // are multiple labels with the same name then the last one in the list is used.
+ repeated ImageLabel imageLabels = 3;
+}
+
+// A BuildPostCommitSpec holds a build post commit hook specification. The hook
+// executes a command in a temporary container running the build output image,
+// immediately after the last layer of the image is committed and before the
+// image is pushed to a registry. The command is executed with the current
+// working directory ($PWD) set to the image's WORKDIR.
+//
+// The build will be marked as failed if the hook execution fails. It will fail
+// if the script or command return a non-zero exit code, or if there is any
+// other error related to starting the temporary container.
+//
+// There are five different ways to configure the hook. As an example, all forms
+// below are equivalent and will execute `rake test --verbose`.
+//
+// 1. Shell script:
+//
+// "postCommit": {
+// "script": "rake test --verbose",
+// }
+//
+// The above is a convenient form which is equivalent to:
+//
+// "postCommit": {
+// "command": ["/bin/sh", "-ic"],
+// "args": ["rake test --verbose"]
+// }
+//
+// 2. A command as the image entrypoint:
+//
+// "postCommit": {
+// "commit": ["rake", "test", "--verbose"]
+// }
+//
+// Command overrides the image entrypoint in the exec form, as documented in
+// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint.
+//
+// 3. Pass arguments to the default entrypoint:
+//
+// "postCommit": {
+// "args": ["rake", "test", "--verbose"]
+// }
+//
+// This form is only useful if the image entrypoint can handle arguments.
+//
+// 4. Shell script with arguments:
+//
+// "postCommit": {
+// "script": "rake test $1",
+// "args": ["--verbose"]
+// }
+//
+// This form is useful if you need to pass arguments that would otherwise be
+// hard to quote properly in the shell script. In the script, $0 will be
+// "/bin/sh" and $1, $2, etc, are the positional arguments from Args.
+//
+// 5. Command with arguments:
+//
+// "postCommit": {
+// "command": ["rake", "test"],
+// "args": ["--verbose"]
+// }
+//
+// This form is equivalent to appending the arguments to the Command slice.
+//
+// It is invalid to provide both Script and Command simultaneously. If none of
+// the fields are specified, the hook is not executed.
+message BuildPostCommitSpec {
+ // command is the command to run. It may not be specified with Script.
+ // This might be needed if the image doesn't have `/bin/sh`, or if you
+ // do not want to use a shell. In all other cases, using Script might be
+ // more convenient.
+ repeated string command = 1;
+
+ // args is a list of arguments that are provided to either Command,
+ // Script or the container image's default entrypoint. The arguments are
+ // placed immediately after the command to be run.
+ repeated string args = 2;
+
+ // script is a shell script to be run with `/bin/sh -ic`. It may not be
+ // specified with Command. Use Script when a shell script is appropriate
+ // to execute the post build hook, for example for running unit tests
+ // with `rake test`. If you need control over the image entrypoint, or
+ // if the image does not have `/bin/sh`, use Command and/or Args.
+ // The `-i` flag is needed to support CentOS and RHEL images that use
+ // Software Collections (SCL), in order to have the appropriate
+ // collections enabled in the shell. E.g., in the Ruby image, this is
+ // necessary to make `ruby`, `bundle` and other binaries available in
+ // the PATH.
+ optional string script = 3;
+}
+
+// BuildRequest is the resource used to pass parameters to build generator
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BuildRequest {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // revision is the information from the source for a specific repo snapshot.
+ optional SourceRevision revision = 2;
+
+ // triggeredByImage is the Image that triggered this build.
+ optional k8s.io.api.core.v1.ObjectReference triggeredByImage = 3;
+
+ // from is the reference to the ImageStreamTag that triggered the build.
+ optional k8s.io.api.core.v1.ObjectReference from = 4;
+
+ // binary indicates a request to build from a binary provided to the builder
+ optional BinaryBuildSource binary = 5;
+
+ // lastVersion (optional) is the LastVersion of the BuildConfig that was used
+ // to generate the build. If the BuildConfig in the generator doesn't match, a build will
+ // not be generated.
+ optional int64 lastVersion = 6;
+
+ // env contains additional environment variables you want to pass into a builder container.
+ repeated k8s.io.api.core.v1.EnvVar env = 7;
+
+ // triggeredBy describes which triggers started the most recent update to the
+ // build configuration and contains information about those triggers.
+ repeated BuildTriggerCause triggeredBy = 8;
+
+ // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ optional DockerStrategyOptions dockerStrategyOptions = 9;
+
+ // SourceStrategyOptions contains additional source-strategy specific options for the build
+ optional SourceStrategyOptions sourceStrategyOptions = 10;
+}
+
+// BuildSource is the SCM used for the build.
+message BuildSource {
+ // type of build input to accept
+ // +k8s:conversion-gen=false
+ // +optional
+ optional string type = 1;
+
+ // binary builds accept a binary as their input. The binary is generally assumed to be a tar,
+ // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build
+ // context and an optional Dockerfile may be specified to override any Dockerfile in the
+ // build context. For Source builds, this is assumed to be an archive as described above. For
+ // Source and container image builds, if binary.asFile is set the build will receive a directory with
+ // a single file. contextDir may be used when an archive is provided. Custom builds will
+ // receive this binary as input on STDIN.
+ optional BinaryBuildSource binary = 2;
+
+ // dockerfile is the raw contents of a Dockerfile which should be built. When this option is
+ // specified, the FROM may be modified based on your strategy base image and additional ENV
+ // stanzas from your strategy environment will be added after the FROM, but before the rest
+ // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like
+ // git - in those cases the Git repo will have any innate Dockerfile replaced in the context
+ // dir.
+ optional string dockerfile = 3;
+
+ // git contains optional information about git build source
+ optional GitBuildSource git = 4;
+
+ // images describes a set of images to be used to provide source for the build
+ repeated ImageSource images = 5;
+
+ // contextDir specifies the sub-directory where the source code for the application exists.
+ // This allows to have buildable sources in directory other than root of
+ // repository.
+ optional string contextDir = 6;
+
+ // sourceSecret is the name of a Secret that would be used for setting
+ // up the authentication for cloning private repository.
+ // The secret contains valid credentials for remote repository, where the
+ // data's key represent the authentication method to be used and value is
+ // the base64 encoded credentials. Supported auth methods are: ssh-privatekey.
+ optional k8s.io.api.core.v1.LocalObjectReference sourceSecret = 7;
+
+ // secrets represents a list of secrets and their destinations that will
+ // be used only for the build.
+ repeated SecretBuildSource secrets = 8;
+
+ // configMaps represents a list of configMaps and their destinations that will
+ // be used for the build.
+ repeated ConfigMapBuildSource configMaps = 9;
+}
+
+// BuildSpec has the information to represent a build and also additional
+// information about a build
+message BuildSpec {
+ // CommonSpec is the information that represents a build
+ optional CommonSpec commonSpec = 1;
+
+ // triggeredBy describes which triggers started the most recent update to the
+ // build configuration and contains information about those triggers.
+ repeated BuildTriggerCause triggeredBy = 2;
+}
+
+// BuildStatus contains the status of a build
+message BuildStatus {
+ // phase is the point in the build lifecycle. Possible values are
+ // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled".
+ optional string phase = 1;
+
+ // cancelled describes if a cancel event was triggered for the build.
+ optional bool cancelled = 2;
+
+ // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.
+ optional string reason = 3;
+
+ // message is a human-readable message indicating details about why the build has this status.
+ optional string message = 4;
+
+ // startTimestamp is a timestamp representing the server time when this Build started
+ // running in a Pod.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTimestamp = 5;
+
+ // completionTimestamp is a timestamp representing the server time when this Build was
+ // finished, whether that build failed or succeeded. It reflects the time at which
+ // the Pod running the Build terminated.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTimestamp = 6;
+
+ // duration contains time.Duration object describing build time.
+ optional int64 duration = 7;
+
+ // outputDockerImageReference contains a reference to the container image that
+ // will be built by this build. Its value is computed from
+ // Build.Spec.Output.To, and should include the registry address, so that
+ // it can be used to push and pull the image.
+ optional string outputDockerImageReference = 8;
+
+ // config is an ObjectReference to the BuildConfig this Build is based on.
+ optional k8s.io.api.core.v1.ObjectReference config = 9;
+
+ // output describes the container image the build has produced.
+ optional BuildStatusOutput output = 10;
+
+ // stages contains details about each stage that occurs during the build
+ // including start time, duration (in milliseconds), and the steps that
+ // occured within each stage.
+ repeated StageInfo stages = 11;
+
+ // logSnippet is the last few lines of the build log. This value is only set for builds that failed.
+ optional string logSnippet = 12;
+
+ // Conditions represents the latest available observations of a build's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated BuildCondition conditions = 13;
+}
+
+// BuildStatusOutput contains the status of the built image.
+message BuildStatusOutput {
+ // to describes the status of the built image being pushed to a registry.
+ optional BuildStatusOutputTo to = 1;
+}
+
+// BuildStatusOutputTo describes the status of the built image with regards to
+// image registry to which it was supposed to be pushed.
+message BuildStatusOutputTo {
+ // imageDigest is the digest of the built container image. The digest uniquely
+ // identifies the image in the registry to which it was pushed.
+ //
+ // Please note that this field may not always be set even if the push
+ // completes successfully - e.g. when the registry returns no digest or
+ // returns it in a format that the builder doesn't understand.
+ optional string imageDigest = 1;
+}
+
+// BuildStrategy contains the details of how to perform a build.
+message BuildStrategy {
+ // type is the kind of build strategy.
+ // +k8s:conversion-gen=false
+ // +optional
+ optional string type = 1;
+
+ // dockerStrategy holds the parameters to the container image build strategy.
+ optional DockerBuildStrategy dockerStrategy = 2;
+
+ // sourceStrategy holds the parameters to the Source build strategy.
+ optional SourceBuildStrategy sourceStrategy = 3;
+
+ // customStrategy holds the parameters to the Custom build strategy
+ optional CustomBuildStrategy customStrategy = 4;
+
+ // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
+ // Deprecated: use OpenShift Pipelines
+ optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5;
+}
+
+// BuildTriggerCause holds information about a triggered build. It is used for
+// displaying build trigger data for each build and build configuration in oc
+// describe. It is also used to describe which triggers led to the most recent
+// update in the build configuration.
+message BuildTriggerCause {
+ // message is used to store a human readable message for why the build was
+ // triggered. E.g.: "Manually triggered by user", "Configuration change",etc.
+ optional string message = 1;
+
+ // genericWebHook holds data about a builds generic webhook trigger.
+ optional GenericWebHookCause genericWebHook = 2;
+
+ // gitHubWebHook represents data for a GitHub webhook that fired a
+ // specific build.
+ optional GitHubWebHookCause githubWebHook = 3;
+
+ // imageChangeBuild stores information about an imagechange event
+ // that triggered a new build.
+ optional ImageChangeCause imageChangeBuild = 4;
+
+ // GitLabWebHook represents data for a GitLab webhook that fired a specific
+ // build.
+ optional GitLabWebHookCause gitlabWebHook = 5;
+
+ // BitbucketWebHook represents data for a Bitbucket webhook that fired a
+ // specific build.
+ optional BitbucketWebHookCause bitbucketWebHook = 6;
+}
+
+// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.
+message BuildTriggerPolicy {
+ // type is the type of build trigger. Valid values:
+ //
+ // - GitHub
+ // GitHubWebHookBuildTriggerType represents a trigger that launches builds on
+ // GitHub webhook invocations
+ //
+ // - Generic
+ // GenericWebHookBuildTriggerType represents a trigger that launches builds on
+ // generic webhook invocations
+ //
+ // - GitLab
+ // GitLabWebHookBuildTriggerType represents a trigger that launches builds on
+ // GitLab webhook invocations
+ //
+ // - Bitbucket
+ // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on
+ // Bitbucket webhook invocations
+ //
+ // - ImageChange
+ // ImageChangeBuildTriggerType represents a trigger that launches builds on
+ // availability of a new version of an image
+ //
+ // - ConfigChange
+ // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation
+ // WARNING: In the future the behavior will change to trigger a build on any config change
+ optional string type = 1;
+
+ // github contains the parameters for a GitHub webhook type of trigger
+ optional WebHookTrigger github = 2;
+
+ // generic contains the parameters for a Generic webhook type of trigger
+ optional WebHookTrigger generic = 3;
+
+ // imageChange contains parameters for an ImageChange type of trigger
+ optional ImageChangeTrigger imageChange = 4;
+
+ // GitLabWebHook contains the parameters for a GitLab webhook type of trigger
+ optional WebHookTrigger gitlab = 5;
+
+ // BitbucketWebHook contains the parameters for a Bitbucket webhook type of
+ // trigger
+ optional WebHookTrigger bitbucket = 6;
+}
+
+// BuildVolume describes a volume that is made available to build pods,
+// such that it can be mounted into buildah's runtime environment.
+// Only a subset of Kubernetes Volume sources are supported.
+message BuildVolume {
+ // name is a unique identifier for this BuildVolume.
+ // It must conform to the Kubernetes DNS label standard and be unique within the pod.
+ // Names that collide with those added by the build controller will result in a
+ // failed build with an error message detailing which name caused the error.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ // +required
+ optional string name = 1;
+
+ // source represents the location and type of the mounted volume.
+ // +required
+ optional BuildVolumeSource source = 2;
+
+ // mounts represents the location of the volume in the image build container
+ // +required
+ // +listType=map
+ // +listMapKey=destinationPath
+ // +patchMergeKey=destinationPath
+ // +patchStrategy=merge
+ repeated BuildVolumeMount mounts = 3;
+}
+
+// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment.
+message BuildVolumeMount {
+ // destinationPath is the path within the buildah runtime environment at which the volume should be mounted.
+ // The transient mount within the build image and the backing volume will both be mounted read only.
+ // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated
+ // by the builder process
+ // Paths that collide with those added by the build controller will result in a
+ // failed build with an error message detailing which path caused the error.
+ optional string destinationPath = 1;
+}
+
+// BuildVolumeSource represents the source of a volume to mount
+// Only one of its supported types may be specified at any given time.
+message BuildVolumeSource {
+ // type is the BuildVolumeSourceType for the volume source.
+ // Type must match the populated volume source.
+ // Valid types are: Secret, ConfigMap
+ optional string type = 1;
+
+ // secret represents a Secret that should populate this volume.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ // +optional
+ optional k8s.io.api.core.v1.SecretVolumeSource secret = 2;
+
+ // configMap represents a ConfigMap that should populate this volume
+ // +optional
+ optional k8s.io.api.core.v1.ConfigMapVolumeSource configMap = 3;
+
+ // csi represents ephemeral storage provided by external CSI drivers which support this capability
+ // +optional
+ optional k8s.io.api.core.v1.CSIVolumeSource csi = 4;
+}
+
+// CommonSpec encapsulates all the inputs necessary to represent a build.
+message CommonSpec {
+ // serviceAccount is the name of the ServiceAccount to use to run the pod
+ // created by this build.
+ // The pod will be allowed to use secrets referenced by the ServiceAccount
+ optional string serviceAccount = 1;
+
+ // source describes the SCM in use.
+ optional BuildSource source = 2;
+
+ // revision is the information from the source for a specific repo snapshot.
+ // This is optional.
+ optional SourceRevision revision = 3;
+
+ // strategy defines how to perform a build.
+ optional BuildStrategy strategy = 4;
+
+ // output describes the container image the Strategy should produce.
+ optional BuildOutput output = 5;
+
+ // resources computes resource requirements to execute the build.
+ optional k8s.io.api.core.v1.ResourceRequirements resources = 6;
+
+ // postCommit is a build hook executed after the build output image is
+ // committed, before it is pushed to a registry.
+ optional BuildPostCommitSpec postCommit = 7;
+
+ // completionDeadlineSeconds is an optional duration in seconds, counted from
+ // the time when a build pod gets scheduled in the system, that the build may
+ // be active on a node before the system actively tries to terminate the
+ // build; value must be positive integer
+ optional int64 completionDeadlineSeconds = 8;
+
+ // nodeSelector is a selector which must be true for the build pod to fit on a node
+ // If nil, it can be overridden by default build nodeselector values for the cluster.
+ // If set to an empty map or a map with any values, default build nodeselector values
+ // are ignored.
+ // +optional
+ optional OptionalNodeSelector nodeSelector = 9;
+
+ // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in
+ // the cluster's proxy configuration, into the build. This lets processes within a build trust
+ // components signed by custom PKI certificate authorities, such as private artifact
+ // repositories and HTTPS proxies.
+ //
+ // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are
+ // managed by the build container, and any changes to this directory or its subdirectories (for
+ // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image.
+ optional bool mountTrustedCA = 10;
+}
+
+// CommonWebHookCause factors out the identical format of these webhook
+// causes into struct so we can share it in the specific causes; it is too late for
+// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.
+message CommonWebHookCause {
+ // Revision is the git source revision information of the trigger.
+ optional SourceRevision revision = 1;
+
+ // Secret is the obfuscated webhook secret that triggered a build.
+ optional string secret = 2;
+}
+
+// ConfigMapBuildSource describes a configmap and its destination directory that will be
+// used only at the build time. The content of the configmap referenced here will
+// be copied into the destination directory instead of mounting.
+message ConfigMapBuildSource {
+ // configMap is a reference to an existing configmap that you want to use in your
+ // build.
+ optional k8s.io.api.core.v1.LocalObjectReference configMap = 1;
+
+ // destinationDir is the directory where the files from the configmap should be
+ // available for the build time.
+ // For the Source build strategy, these will be injected into a container
+ // where the assemble script runs.
+ // For the container image build strategy, these will be copied into the build
+ // directory, where the Dockerfile is located, so users can ADD or COPY them
+ // during container image build.
+ optional string destinationDir = 2;
+}
+
+// CustomBuildStrategy defines input parameters specific to Custom build.
+message CustomBuildStrategy {
+ // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which
+ // the container image should be pulled
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+
+ // pullSecret is the name of a Secret that would be used for setting up
+ // the authentication for pulling the container images from the private Docker
+ // registries
+ optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2;
+
+ // env contains additional environment variables you want to pass into a builder container.
+ repeated k8s.io.api.core.v1.EnvVar env = 3;
+
+ // exposeDockerSocket will allow running Docker commands (and build container images) from
+ // inside the container.
+ // TODO: Allow admins to enforce 'false' for this option
+ optional bool exposeDockerSocket = 4;
+
+ // forcePull describes if the controller should configure the build pod to always pull the images
+ // for the builder or only pull if it is not present locally
+ optional bool forcePull = 5;
+
+ // secrets is a list of additional secrets that will be included in the build pod
+ repeated SecretSpec secrets = 6;
+
+ // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder
+ optional string buildAPIVersion = 7;
+}
+
+// DockerBuildStrategy defines input parameters specific to container image build.
+message DockerBuildStrategy {
+ // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides
+ // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds,
+ // this will replace the image in the last FROM directive of the file.
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+
+ // pullSecret is the name of a Secret that would be used for setting up
+ // the authentication for pulling the container images from the private Docker
+ // registries
+ optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2;
+
+ // noCache if set to true indicates that the container image build must be executed with the
+ // --no-cache=true flag
+ optional bool noCache = 3;
+
+ // env contains additional environment variables you want to pass into a builder container.
+ repeated k8s.io.api.core.v1.EnvVar env = 4;
+
+ // forcePull describes if the builder should pull the images from registry prior to building.
+ optional bool forcePull = 5;
+
+ // dockerfilePath is the path of the Dockerfile that will be used to build the container image,
+ // relative to the root of the context (contextDir).
+ // Defaults to `Dockerfile` if unset.
+ optional string dockerfilePath = 6;
+
+ // buildArgs contains build arguments that will be resolved in the Dockerfile. See
+ // https://docs.docker.com/engine/reference/builder/#/arg for more details.
+ // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field
+ // are ignored.
+ repeated k8s.io.api.core.v1.EnvVar buildArgs = 7;
+
+ // imageOptimizationPolicy describes what optimizations the system can use when building images
+ // to reduce the final size or time spent building the image. The default policy is 'None' which
+ // means the final build image will be equivalent to an image created by the container image build API.
+ // The experimental policy 'SkipLayers' will avoid commiting new layers in between each
+ // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None'
+ // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as
+ // 'SkipLayers' but simply warns if compatibility cannot be preserved.
+ optional string imageOptimizationPolicy = 8;
+
+ // volumes is a list of input volumes that can be mounted into the builds runtime environment.
+ // Only a subset of Kubernetes Volume sources are supported by builds.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes
+ // +listType=map
+ // +listMapKey=name
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated BuildVolume volumes = 9;
+}
+
+// DockerStrategyOptions contains extra strategy options for container image builds
+message DockerStrategyOptions {
+ // Args contains any build arguments that are to be passed to Docker. See
+ // https://docs.docker.com/engine/reference/builder/#/arg for more details
+ repeated k8s.io.api.core.v1.EnvVar buildArgs = 1;
+
+ // noCache overrides the docker-strategy noCache option in the build config
+ optional bool noCache = 2;
+}
+
+// GenericWebHookCause holds information about a generic WebHook that
+// triggered a build.
+message GenericWebHookCause {
+ // revision is an optional field that stores the git source revision
+ // information of the generic webhook trigger when it is available.
+ optional SourceRevision revision = 1;
+
+ // secret is the obfuscated webhook secret that triggered a build.
+ optional string secret = 2;
+}
+
+// GenericWebHookEvent is the payload expected for a generic webhook post
+message GenericWebHookEvent {
+ // type is the type of source repository
+ // +k8s:conversion-gen=false
+ optional string type = 1;
+
+ // git is the git information if the Type is BuildSourceGit
+ optional GitInfo git = 2;
+
+ // env contains additional environment variables you want to pass into a builder container.
+ // ValueFrom is not supported.
+ repeated k8s.io.api.core.v1.EnvVar env = 3;
+
+ // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ optional DockerStrategyOptions dockerStrategyOptions = 4;
+}
+
+// GitBuildSource defines the parameters of a Git SCM
+message GitBuildSource {
+ // uri points to the source that will be built. The structure of the source
+ // will depend on the type of build to run
+ optional string uri = 1;
+
+ // ref is the branch/tag/ref to build.
+ optional string ref = 2;
+
+ // proxyConfig defines the proxies to use for the git clone operation. Values
+ // not set here are inherited from cluster-wide build git proxy settings.
+ optional ProxyConfig proxyConfig = 3;
+}
+
+// GitHubWebHookCause has information about a GitHub webhook that triggered a
+// build.
+message GitHubWebHookCause {
+ // revision is the git revision information of the trigger.
+ optional SourceRevision revision = 1;
+
+ // secret is the obfuscated webhook secret that triggered a build.
+ optional string secret = 2;
+}
+
+// GitInfo is the aggregated git information for a generic webhook post
+message GitInfo {
+ optional GitBuildSource gitBuildSource = 1;
+
+ optional GitSourceRevision gitSourceRevision = 2;
+
+ // Refs is a list of GitRefs for the provided repo - generally sent
+ // when used from a post-receive hook. This field is optional and is
+ // used when sending multiple refs
+ repeated GitRefInfo refs = 3;
+}
+
+// GitLabWebHookCause has information about a GitLab webhook that triggered a
+// build.
+message GitLabWebHookCause {
+ optional CommonWebHookCause commonSpec = 1;
+}
+
+// GitRefInfo is a single ref
+message GitRefInfo {
+ optional GitBuildSource gitBuildSource = 1;
+
+ optional GitSourceRevision gitSourceRevision = 2;
+}
+
+// GitSourceRevision is the commit information from a git source for a build
+message GitSourceRevision {
+ // commit is the commit hash identifying a specific commit
+ optional string commit = 1;
+
+ // author is the author of a specific commit
+ optional SourceControlUser author = 2;
+
+ // committer is the committer of a specific commit
+ optional SourceControlUser committer = 3;
+
+ // message is the description of a specific commit
+ optional string message = 4;
+}
+
+// ImageChangeCause contains information about the image that triggered a
+// build
+message ImageChangeCause {
+ // imageID is the ID of the image that triggered a new build.
+ optional string imageID = 1;
+
+ // fromRef contains detailed information about an image that triggered a
+ // build.
+ optional k8s.io.api.core.v1.ObjectReference fromRef = 2;
+}
+
+// ImageChangeTrigger allows builds to be triggered when an ImageStream changes
+message ImageChangeTrigger {
+ // lastTriggeredImageID is used internally by the ImageChangeController to save last
+ // used image ID for build
+ // This field is deprecated and will be removed in a future release.
+ // Deprecated
+ optional string lastTriggeredImageID = 1;
+
+ // from is a reference to an ImageStreamTag that will trigger a build when updated
+ // It is optional. If no From is specified, the From image from the build strategy
+ // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in
+ // a build configuration.
+ optional k8s.io.api.core.v1.ObjectReference from = 2;
+
+ // paused is true if this trigger is temporarily disabled. Optional.
+ optional bool paused = 3;
+}
+
+// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy
+// specified in the BuildConfigSpec.Triggers struct.
+message ImageChangeTriggerStatus {
+ // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started.
+ // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started.
+ optional string lastTriggeredImageID = 1;
+
+ // from is the ImageStreamTag that is the source of the trigger.
+ optional ImageStreamTagReference from = 2;
+
+ // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start.
+ // This field is only updated when this trigger specifically started a Build.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTriggerTime = 3;
+}
+
+// ImageLabel represents a label applied to the resulting image.
+message ImageLabel {
+ // name defines the name of the label. It must have non-zero length.
+ optional string name = 1;
+
+ // value defines the literal value of the label.
+ optional string value = 2;
+}
+
+// ImageSource is used to describe build source that will be extracted from an image or used during a
+// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used.
+// A pull secret can be specified to pull the image from an external registry or override the default
+// service account secret if pulling from the internal registry. Image sources can either be used to
+// extract content from an image and place it into the build context along with the repository source,
+// or used directly during a multi-stage container image build to allow content to be copied without overwriting
+// the contents of the repository source (see the 'paths' and 'as' fields).
+message ImageSource {
+ // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to
+ // copy source from.
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+
+ // A list of image names that this source will be used in place of during a multi-stage container image
+ // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image
+ // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile
+ // does not reference an image source it is ignored. This field and paths may both be set, in which case
+ // the contents will be used twice.
+ // +optional
+ repeated string as = 4;
+
+ // paths is a list of source and destination paths to copy from the image. This content will be copied
+ // into the build context prior to starting the build. If no paths are set, the build context will
+ // not be altered.
+ // +optional
+ repeated ImageSourcePath paths = 2;
+
+ // pullSecret is a reference to a secret to be used to pull the image from a registry
+ // If the image is pulled from the OpenShift registry, this field does not need to be set.
+ optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 3;
+}
+
+// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.
+message ImageSourcePath {
+ // sourcePath is the absolute path of the file or directory inside the image to
+ // copy to the build directory. If the source path ends in /. then the content of
+ // the directory will be copied, but the directory itself will not be created at the
+ // destination.
+ optional string sourcePath = 1;
+
+ // destinationDir is the relative directory within the build directory
+ // where files copied from the image are placed.
+ optional string destinationDir = 2;
+}
+
+// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name.
+message ImageStreamTagReference {
+ // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located
+ optional string namespace = 1;
+
+ // name is the name of the ImageStreamTag for an ImageChangeTrigger
+ optional string name = 2;
+}
+
+// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build.
+// Deprecated: use OpenShift Pipelines
+message JenkinsPipelineBuildStrategy {
+ // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
+ // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are
+ // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.
+ optional string jenkinsfilePath = 1;
+
+ // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
+ optional string jenkinsfile = 2;
+
+ // env contains additional environment variables you want to pass into a build pipeline.
+ repeated k8s.io.api.core.v1.EnvVar env = 3;
+}
+
+// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message OptionalNodeSelector {
+ // items, if empty, will result in an empty map
+
+ map items = 1;
+}
+
+// ProxyConfig defines what proxies to use for an operation
+message ProxyConfig {
+ // httpProxy is a proxy used to reach the git repository over http
+ optional string httpProxy = 3;
+
+ // httpsProxy is a proxy used to reach the git repository over https
+ optional string httpsProxy = 4;
+
+ // noProxy is the list of domains for which the proxy should not be used
+ optional string noProxy = 5;
+}
+
+// SecretBuildSource describes a secret and its destination directory that will be
+// used only at the build time. The content of the secret referenced here will
+// be copied into the destination directory instead of mounting.
+message SecretBuildSource {
+ // secret is a reference to an existing secret that you want to use in your
+ // build.
+ optional k8s.io.api.core.v1.LocalObjectReference secret = 1;
+
+ // destinationDir is the directory where the files from the secret should be
+ // available for the build time.
+ // For the Source build strategy, these will be injected into a container
+ // where the assemble script runs. Later, when the script finishes, all files
+ // injected will be truncated to zero length.
+ // For the container image build strategy, these will be copied into the build
+ // directory, where the Dockerfile is located, so users can ADD or COPY them
+ // during container image build.
+ optional string destinationDir = 2;
+}
+
+// SecretLocalReference contains information that points to the local secret being used
+message SecretLocalReference {
+ // Name is the name of the resource in the same namespace being referenced
+ optional string name = 1;
+}
+
+// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point
+message SecretSpec {
+ // secretSource is a reference to the secret
+ optional k8s.io.api.core.v1.LocalObjectReference secretSource = 1;
+
+ // mountPath is the path at which to mount the secret
+ optional string mountPath = 2;
+}
+
+// SourceBuildStrategy defines input parameters specific to an Source build.
+message SourceBuildStrategy {
+ // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which
+ // the container image should be pulled
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+
+ // pullSecret is the name of a Secret that would be used for setting up
+ // the authentication for pulling the container images from the private Docker
+ // registries
+ optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2;
+
+ // env contains additional environment variables you want to pass into a builder container.
+ repeated k8s.io.api.core.v1.EnvVar env = 3;
+
+ // scripts is the location of Source scripts
+ optional string scripts = 4;
+
+ // incremental flag forces the Source build to do incremental builds if true.
+ optional bool incremental = 5;
+
+ // forcePull describes if the builder should pull the images from registry prior to building.
+ optional bool forcePull = 6;
+
+ // volumes is a list of input volumes that can be mounted into the builds runtime environment.
+ // Only a subset of Kubernetes Volume sources are supported by builds.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes
+ // +listType=map
+ // +listMapKey=name
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated BuildVolume volumes = 9;
+}
+
+// SourceControlUser defines the identity of a user of source control
+message SourceControlUser {
+ // name of the source control user
+ optional string name = 1;
+
+ // email of the source control user
+ optional string email = 2;
+}
+
+// SourceRevision is the revision or commit information from the source for the build
+message SourceRevision {
+ // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'
+ // +k8s:conversion-gen=false
+ optional string type = 1;
+
+ // Git contains information about git-based build source
+ optional GitSourceRevision git = 2;
+}
+
+// SourceStrategyOptions contains extra strategy options for Source builds
+message SourceStrategyOptions {
+ // incremental overrides the source-strategy incremental option in the build config
+ optional bool incremental = 1;
+}
+
+// StageInfo contains details about a build stage.
+message StageInfo {
+ // name is a unique identifier for each build stage that occurs.
+ optional string name = 1;
+
+ // startTime is a timestamp representing the server time when this Stage started.
+ // It is represented in RFC3339 form and is in UTC.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2;
+
+ // durationMilliseconds identifies how long the stage took
+ // to complete in milliseconds.
+ // Note: the duration of a stage can exceed the sum of the duration of the steps within
+ // the stage as not all actions are accounted for in explicit build steps.
+ optional int64 durationMilliseconds = 3;
+
+ // steps contains details about each step that occurs during a build stage
+ // including start time and duration in milliseconds.
+ repeated StepInfo steps = 4;
+}
+
+// StepInfo contains details about a build step.
+message StepInfo {
+ // name is a unique identifier for each build step.
+ optional string name = 1;
+
+ // startTime is a timestamp representing the server time when this Step started.
+ // it is represented in RFC3339 form and is in UTC.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2;
+
+ // durationMilliseconds identifies how long the step took
+ // to complete in milliseconds.
+ optional int64 durationMilliseconds = 3;
+}
+
+// WebHookTrigger is a trigger that gets invoked using a webhook type of post
+message WebHookTrigger {
+ // secret used to validate requests.
+ // Deprecated: use SecretReference instead.
+ optional string secret = 1;
+
+ // allowEnv determines whether the webhook can set environment variables; can only
+ // be set to true for GenericWebHook.
+ optional bool allowEnv = 2;
+
+ // secretReference is a reference to a secret in the same namespace,
+ // containing the value to be validated when the webhook is invoked.
+ // The secret being referenced must contain a key named "WebHookSecretKey", the value
+ // of which will be checked against the value supplied in the webhook invocation.
+ optional SecretLocalReference secretReference = 3;
+}
+
diff --git a/vendor/github.com/openshift/api/build/v1/legacy.go b/vendor/github.com/openshift/api/build/v1/legacy.go
new file mode 100644
index 0000000000..a74627d2cd
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/legacy.go
@@ -0,0 +1,28 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &Build{},
+ &BuildList{},
+ &BuildConfig{},
+ &BuildConfigList{},
+ &BuildLog{},
+ &BuildRequest{},
+ &BuildLogOptions{},
+ &BinaryBuildRequestOptions{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/build/v1/register.go b/vendor/github.com/openshift/api/build/v1/register.go
new file mode 100644
index 0000000000..16f68ea8cd
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/register.go
@@ -0,0 +1,47 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "build.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// addKnownTypes adds types to API group
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &Build{},
+ &BuildList{},
+ &BuildConfig{},
+ &BuildConfigList{},
+ &BuildLog{},
+ &BuildRequest{},
+ &BuildLogOptions{},
+ &BinaryBuildRequestOptions{},
+ // This is needed for webhooks
+ &corev1.PodProxyOptions{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go
new file mode 100644
index 0000000000..ba836aad81
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/types.go
@@ -0,0 +1,1469 @@
+package v1
+
+import (
+ "fmt"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:method=UpdateDetails,verb=update,subresource=details
+// +genclient:method=Clone,verb=create,subresource=clone,input=BuildRequest
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Build encapsulates the inputs needed to produce a new deployable image, as well as
+// the status of the execution and a reference to the Pod which executed the build.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Build struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec is all the inputs used to execute the build.
+ Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // status is the current status of the build.
+ // +optional
+ Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// BuildSpec has the information to represent a build and also additional
+// information about a build
+type BuildSpec struct {
+ // CommonSpec is the information that represents a build
+ CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"`
+
+ // triggeredBy describes which triggers started the most recent update to the
+ // build configuration and contains information about those triggers.
+ TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,2,rep,name=triggeredBy"`
+}
+
+// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type OptionalNodeSelector map[string]string
+
+func (t OptionalNodeSelector) String() string {
+ return fmt.Sprintf("%v", map[string]string(t))
+}
+
+// CommonSpec encapsulates all the inputs necessary to represent a build.
+type CommonSpec struct {
+ // serviceAccount is the name of the ServiceAccount to use to run the pod
+ // created by this build.
+ // The pod will be allowed to use secrets referenced by the ServiceAccount
+ ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"`
+
+ // source describes the SCM in use.
+ Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"`
+
+ // revision is the information from the source for a specific repo snapshot.
+ // This is optional.
+ Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"`
+
+ // strategy defines how to perform a build.
+ Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"`
+
+ // output describes the container image the Strategy should produce.
+ Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"`
+
+ // resources computes resource requirements to execute the build.
+ Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"`
+
+ // postCommit is a build hook executed after the build output image is
+ // committed, before it is pushed to a registry.
+ PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"`
+
+ // completionDeadlineSeconds is an optional duration in seconds, counted from
+ // the time when a build pod gets scheduled in the system, that the build may
+ // be active on a node before the system actively tries to terminate the
+ // build; value must be positive integer
+ CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"`
+
+ // nodeSelector is a selector which must be true for the build pod to fit on a node
+ // If nil, it can be overridden by default build nodeselector values for the cluster.
+ // If set to an empty map or a map with any values, default build nodeselector values
+ // are ignored.
+ // +optional
+ NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"`
+
+ // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in
+ // the cluster's proxy configuration, into the build. This lets processes within a build trust
+ // components signed by custom PKI certificate authorities, such as private artifact
+ // repositories and HTTPS proxies.
+ //
+ // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are
+ // managed by the build container, and any changes to this directory or its subdirectories (for
+ // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image.
+ MountTrustedCA *bool `json:"mountTrustedCA,omitempty" protobuf:"varint,10,opt,name=mountTrustedCA"`
+}
+
+// BuildTriggerCause holds information about a triggered build. It is used for
+// displaying build trigger data for each build and build configuration in oc
+// describe. It is also used to describe which triggers led to the most recent
+// update in the build configuration.
+type BuildTriggerCause struct {
+ // message is used to store a human readable message for why the build was
+ // triggered. E.g.: "Manually triggered by user", "Configuration change",etc.
+ Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"`
+
+ // genericWebHook holds data about a builds generic webhook trigger.
+ GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"`
+
+ // gitHubWebHook represents data for a GitHub webhook that fired a
+ //specific build.
+ GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"`
+
+ // imageChangeBuild stores information about an imagechange event
+ // that triggered a new build.
+ ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"`
+
+ // GitLabWebHook represents data for a GitLab webhook that fired a specific
+ // build.
+ GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"`
+
+ // BitbucketWebHook represents data for a Bitbucket webhook that fired a
+ // specific build.
+ BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"`
+}
+
+// GenericWebHookCause holds information about a generic WebHook that
+// triggered a build.
+type GenericWebHookCause struct {
+ // revision is an optional field that stores the git source revision
+ // information of the generic webhook trigger when it is available.
+ Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
+
+ // secret is the obfuscated webhook secret that triggered a build.
+ Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+}
+
+// GitHubWebHookCause has information about a GitHub webhook that triggered a
+// build.
+type GitHubWebHookCause struct {
+ // revision is the git revision information of the trigger.
+ Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
+
+ // secret is the obfuscated webhook secret that triggered a build.
+ Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+}
+
+// CommonWebHookCause factors out the identical format of these webhook
+// causes into struct so we can share it in the specific causes; it is too late for
+// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.
+type CommonWebHookCause struct {
+ // Revision is the git source revision information of the trigger.
+ Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
+
+ // Secret is the obfuscated webhook secret that triggered a build.
+ Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+}
+
+// GitLabWebHookCause has information about a GitLab webhook that triggered a
+// build.
+type GitLabWebHookCause struct {
+ CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"`
+}
+
+// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a
+// build.
+type BitbucketWebHookCause struct {
+ CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"`
+}
+
+// ImageChangeCause contains information about the image that triggered a
+// build
+type ImageChangeCause struct {
+ // imageID is the ID of the image that triggered a new build.
+ ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"`
+
+ // fromRef contains detailed information about an image that triggered a
+ // build.
+ FromRef *corev1.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"`
+}
+
+// BuildStatus contains the status of a build
+type BuildStatus struct {
+ // phase is the point in the build lifecycle. Possible values are
+ // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled".
+ Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"`
+
+ // cancelled describes if a cancel event was triggered for the build.
+ Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"`
+
+ // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.
+ Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"`
+
+ // message is a human-readable message indicating details about why the build has this status.
+ Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+
+ // startTimestamp is a timestamp representing the server time when this Build started
+ // running in a Pod.
+ // It is represented in RFC3339 form and is in UTC.
+ StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"`
+
+ // completionTimestamp is a timestamp representing the server time when this Build was
+ // finished, whether that build failed or succeeded. It reflects the time at which
+ // the Pod running the Build terminated.
+ // It is represented in RFC3339 form and is in UTC.
+ CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"`
+
+ // duration contains time.Duration object describing build time.
+ Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"`
+
+ // outputDockerImageReference contains a reference to the container image that
+ // will be built by this build. Its value is computed from
+ // Build.Spec.Output.To, and should include the registry address, so that
+ // it can be used to push and pull the image.
+ OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"`
+
+ // config is an ObjectReference to the BuildConfig this Build is based on.
+ Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"`
+
+ // output describes the container image the build has produced.
+ Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"`
+
+ // stages contains details about each stage that occurs during the build
+ // including start time, duration (in milliseconds), and the steps that
+ // occured within each stage.
+ Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"`
+
+ // logSnippet is the last few lines of the build log. This value is only set for builds that failed.
+ LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"`
+
+ // Conditions represents the latest available observations of a build's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"`
+}
+
+// StageInfo contains details about a build stage.
+type StageInfo struct {
+ // name is a unique identifier for each build stage that occurs.
+ Name StageName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+ // startTime is a timestamp representing the server time when this Stage started.
+ // It is represented in RFC3339 form and is in UTC.
+ StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
+
+ // durationMilliseconds identifies how long the stage took
+ // to complete in milliseconds.
+ // Note: the duration of a stage can exceed the sum of the duration of the steps within
+ // the stage as not all actions are accounted for in explicit build steps.
+ DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"`
+
+ // steps contains details about each step that occurs during a build stage
+ // including start time and duration in milliseconds.
+ Steps []StepInfo `json:"steps,omitempty" protobuf:"bytes,4,opt,name=steps"`
+}
+
+// StageName is the unique identifier for each build stage.
+type StageName string
+
+// Valid values for StageName
+const (
+ // StageFetchInputs fetches any inputs such as source code.
+ StageFetchInputs StageName = "FetchInputs"
+
+ // StagePullImages pulls any images that are needed such as
+ // base images or input images.
+ StagePullImages StageName = "PullImages"
+
+ // StageBuild performs the steps necessary to build the image.
+ StageBuild StageName = "Build"
+
+ // StagePostCommit executes any post commit steps.
+ StagePostCommit StageName = "PostCommit"
+
+ // StagePushImage pushes the image to the node.
+ StagePushImage StageName = "PushImage"
+)
+
+// StepInfo contains details about a build step.
+type StepInfo struct {
+ // name is a unique identifier for each build step.
+ Name StepName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+ // startTime is a timestamp representing the server time when this Step started.
+ // it is represented in RFC3339 form and is in UTC.
+ StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
+
+ // durationMilliseconds identifies how long the step took
+ // to complete in milliseconds.
+ DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"`
+}
+
+// StepName is a unique identifier for each build step.
+type StepName string
+
+// Valid values for StepName
+const (
+ // StepExecPostCommitHook executes the buildconfigs post commit hook.
+ StepExecPostCommitHook StepName = "RunPostCommitHook"
+
+ // StepFetchGitSource fetches source code for the build.
+ StepFetchGitSource StepName = "FetchGitSource"
+
+ // StepPullBaseImage pulls a base image for the build.
+ StepPullBaseImage StepName = "PullBaseImage"
+
+ // StepPullInputImage pulls an input image for the build.
+ StepPullInputImage StepName = "PullInputImage"
+
+ // StepPushImage pushes an image to the registry.
+ StepPushImage StepName = "PushImage"
+
+ // StepPushDockerImage pushes a container image to the registry.
+ StepPushDockerImage StepName = "PushDockerImage"
+
+ //StepDockerBuild performs the container image build
+ StepDockerBuild StepName = "DockerBuild"
+)
+
+// BuildPhase represents the status of a build at a point in time.
+type BuildPhase string
+
+// Valid values for BuildPhase.
+const (
+ // BuildPhaseNew is automatically assigned to a newly created build.
+ BuildPhaseNew BuildPhase = "New"
+
+ // BuildPhasePending indicates that a pod name has been assigned and a build is
+ // about to start running.
+ BuildPhasePending BuildPhase = "Pending"
+
+ // BuildPhaseRunning indicates that a pod has been created and a build is running.
+ BuildPhaseRunning BuildPhase = "Running"
+
+ // BuildPhaseComplete indicates that a build has been successful.
+ BuildPhaseComplete BuildPhase = "Complete"
+
+ // BuildPhaseFailed indicates that a build has executed and failed.
+ BuildPhaseFailed BuildPhase = "Failed"
+
+ // BuildPhaseError indicates that an error prevented the build from executing.
+ BuildPhaseError BuildPhase = "Error"
+
+ // BuildPhaseCancelled indicates that a running/pending build was stopped from executing.
+ BuildPhaseCancelled BuildPhase = "Cancelled"
+)
+
+type BuildConditionType string
+
+// BuildCondition describes the state of a build at a certain point.
+type BuildCondition struct {
+ // Type of build condition.
+ Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
+ // The last time this condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"`
+ // The last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // The reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // A human readable message indicating details about the transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// StatusReason is a brief CamelCase string that describes a temporary or
+// permanent build error condition, meant for machine parsing and tidy display
+// in the CLI.
+type StatusReason string
+
+// BuildStatusOutput contains the status of the built image.
+type BuildStatusOutput struct {
+ // to describes the status of the built image being pushed to a registry.
+ To *BuildStatusOutputTo `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"`
+}
+
+// BuildStatusOutputTo describes the status of the built image with regards to
+// image registry to which it was supposed to be pushed.
+type BuildStatusOutputTo struct {
+ // imageDigest is the digest of the built container image. The digest uniquely
+ // identifies the image in the registry to which it was pushed.
+ //
+ // Please note that this field may not always be set even if the push
+ // completes successfully - e.g. when the registry returns no digest or
+ // returns it in a format that the builder doesn't understand.
+ ImageDigest string `json:"imageDigest,omitempty" protobuf:"bytes,1,opt,name=imageDigest"`
+}
+
+// BuildSourceType is the type of SCM used.
+type BuildSourceType string
+
+// Valid values for BuildSourceType.
+const (
+ //BuildSourceGit instructs a build to use a Git source control repository as the build input.
+ BuildSourceGit BuildSourceType = "Git"
+ // BuildSourceDockerfile uses a Dockerfile as the start of a build
+ BuildSourceDockerfile BuildSourceType = "Dockerfile"
+ // BuildSourceBinary indicates the build will accept a Binary file as input.
+ BuildSourceBinary BuildSourceType = "Binary"
+ // BuildSourceImage indicates the build will accept an image as input
+ BuildSourceImage BuildSourceType = "Image"
+ // BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies)
+ BuildSourceNone BuildSourceType = "None"
+)
+
+// BuildSource is the SCM used for the build.
+type BuildSource struct {
+ // type of build input to accept
+ // +k8s:conversion-gen=false
+ // +optional
+ Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"`
+
+ // binary builds accept a binary as their input. The binary is generally assumed to be a tar,
+ // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build
+ // context and an optional Dockerfile may be specified to override any Dockerfile in the
+ // build context. For Source builds, this is assumed to be an archive as described above. For
+ // Source and container image builds, if binary.asFile is set the build will receive a directory with
+ // a single file. contextDir may be used when an archive is provided. Custom builds will
+ // receive this binary as input on STDIN.
+ Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"`
+
+ // dockerfile is the raw contents of a Dockerfile which should be built. When this option is
+ // specified, the FROM may be modified based on your strategy base image and additional ENV
+ // stanzas from your strategy environment will be added after the FROM, but before the rest
+ // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like
+ // git - in those cases the Git repo will have any innate Dockerfile replaced in the context
+ // dir.
+ Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"`
+
+ // git contains optional information about git build source
+ Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"`
+
+ // images describes a set of images to be used to provide source for the build
+ Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"`
+
+ // contextDir specifies the sub-directory where the source code for the application exists.
+ // This allows to have buildable sources in directory other than root of
+ // repository.
+ ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"`
+
+ // sourceSecret is the name of a Secret that would be used for setting
+ // up the authentication for cloning private repository.
+ // The secret contains valid credentials for remote repository, where the
+ // data's key represent the authentication method to be used and value is
+ // the base64 encoded credentials. Supported auth methods are: ssh-privatekey.
+ SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"`
+
+ // secrets represents a list of secrets and their destinations that will
+ // be used only for the build.
+ Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"`
+
+ // configMaps represents a list of configMaps and their destinations that will
+ // be used for the build.
+ ConfigMaps []ConfigMapBuildSource `json:"configMaps,omitempty" protobuf:"bytes,9,rep,name=configMaps"`
+}
+
+// ImageSource is used to describe build source that will be extracted from an image or used during a
+// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used.
+// A pull secret can be specified to pull the image from an external registry or override the default
+// service account secret if pulling from the internal registry. Image sources can either be used to
+// extract content from an image and place it into the build context along with the repository source,
+// or used directly during a multi-stage container image build to allow content to be copied without overwriting
+// the contents of the repository source (see the 'paths' and 'as' fields).
+type ImageSource struct {
+ // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to
+ // copy source from.
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
+
+ // A list of image names that this source will be used in place of during a multi-stage container image
+ // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image
+ // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile
+ // does not reference an image source it is ignored. This field and paths may both be set, in which case
+ // the contents will be used twice.
+ // +optional
+ As []string `json:"as,omitempty" protobuf:"bytes,4,rep,name=as"`
+
+ // paths is a list of source and destination paths to copy from the image. This content will be copied
+ // into the build context prior to starting the build. If no paths are set, the build context will
+ // not be altered.
+ // +optional
+ Paths []ImageSourcePath `json:"paths,omitempty" protobuf:"bytes,2,rep,name=paths"`
+
+ // pullSecret is a reference to a secret to be used to pull the image from a registry
+ // If the image is pulled from the OpenShift registry, this field does not need to be set.
+ PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"`
+}
+
+// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.
+type ImageSourcePath struct {
+ // sourcePath is the absolute path of the file or directory inside the image to
+ // copy to the build directory. If the source path ends in /. then the content of
+ // the directory will be copied, but the directory itself will not be created at the
+ // destination.
+ SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"`
+
+ // destinationDir is the relative directory within the build directory
+ // where files copied from the image are placed.
+ DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"`
+}
+
+// SecretBuildSource describes a secret and its destination directory that will be
+// used only at the build time. The content of the secret referenced here will
+// be copied into the destination directory instead of mounting.
+type SecretBuildSource struct {
+ // secret is a reference to an existing secret that you want to use in your
+ // build.
+ Secret corev1.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"`
+
+ // destinationDir is the directory where the files from the secret should be
+ // available for the build time.
+ // For the Source build strategy, these will be injected into a container
+ // where the assemble script runs. Later, when the script finishes, all files
+ // injected will be truncated to zero length.
+ // For the container image build strategy, these will be copied into the build
+ // directory, where the Dockerfile is located, so users can ADD or COPY them
+ // during container image build.
+ DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"`
+}
+
+// ConfigMapBuildSource describes a configmap and its destination directory that will be
+// used only at the build time. The content of the configmap referenced here will
+// be copied into the destination directory instead of mounting.
+type ConfigMapBuildSource struct {
+ // configMap is a reference to an existing configmap that you want to use in your
+ // build.
+ ConfigMap corev1.LocalObjectReference `json:"configMap" protobuf:"bytes,1,opt,name=configMap"`
+
+ // destinationDir is the directory where the files from the configmap should be
+ // available for the build time.
+ // For the Source build strategy, these will be injected into a container
+ // where the assemble script runs.
+ // For the container image build strategy, these will be copied into the build
+ // directory, where the Dockerfile is located, so users can ADD or COPY them
+ // during container image build.
+ DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"`
+}
+
+// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies,
+// where the file will be extracted and used as the build source.
+type BinaryBuildSource struct {
+ // asFile indicates that the provided binary input should be considered a single file
+ // within the build input. For example, specifying "webapp.war" would place the provided
+ // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build
+ // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source.
+ // The custom strategy receives this binary as standard input. This filename may not
+ // contain slashes or be '..' or '.'.
+ AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"`
+}
+
+// SourceRevision is the revision or commit information from the source for the build
+type SourceRevision struct {
+ // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'
+ // +k8s:conversion-gen=false
+ Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"`
+
+ // Git contains information about git-based build source
+ Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"`
+}
+
+// GitSourceRevision is the commit information from a git source for a build
+type GitSourceRevision struct {
+ // commit is the commit hash identifying a specific commit
+ Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"`
+
+ // author is the author of a specific commit
+ Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"`
+
+ // committer is the committer of a specific commit
+ Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"`
+
+ // message is the description of a specific commit
+ Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+}
+
+// ProxyConfig defines what proxies to use for an operation
+type ProxyConfig struct {
+ // httpProxy is a proxy used to reach the git repository over http
+ HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"`
+
+ // httpsProxy is a proxy used to reach the git repository over https
+ HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"`
+
+ // noProxy is the list of domains for which the proxy should not be used
+ NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"`
+}
+
+// GitBuildSource defines the parameters of a Git SCM
+type GitBuildSource struct {
+ // uri points to the source that will be built. The structure of the source
+ // will depend on the type of build to run
+ URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"`
+
+ // ref is the branch/tag/ref to build.
+ Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"`
+
+ // proxyConfig defines the proxies to use for the git clone operation. Values
+ // not set here are inherited from cluster-wide build git proxy settings.
+ ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"`
+}
+
+// SourceControlUser defines the identity of a user of source control
+type SourceControlUser struct {
+ // name of the source control user
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+ // email of the source control user
+ Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"`
+}
+
+// BuildStrategy contains the details of how to perform a build.
+type BuildStrategy struct {
+ // type is the kind of build strategy.
+ // +k8s:conversion-gen=false
+ // +optional
+ Type BuildStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"`
+
+ // dockerStrategy holds the parameters to the container image build strategy.
+ DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"`
+
+ // sourceStrategy holds the parameters to the Source build strategy.
+ SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"`
+
+ // customStrategy holds the parameters to the Custom build strategy
+ CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"`
+
+ // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
+ // Deprecated: use OpenShift Pipelines
+ JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"`
+}
+
+// BuildStrategyType describes a particular way of performing a build.
+type BuildStrategyType string
+
+// Valid values for BuildStrategyType.
+const (
+ // DockerBuildStrategyType performs builds using a Dockerfile.
+ DockerBuildStrategyType BuildStrategyType = "Docker"
+
+ // SourceBuildStrategyType performs builds build using Source To Images with a Git repository
+ // and a builder image.
+ SourceBuildStrategyType BuildStrategyType = "Source"
+
+ // CustomBuildStrategyType performs builds using custom builder container image.
+ CustomBuildStrategyType BuildStrategyType = "Custom"
+
+ // JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline.
+ JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline"
+)
+
+// CustomBuildStrategy defines input parameters specific to Custom build.
+type CustomBuildStrategy struct {
+ // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which
+ // the container image should be pulled
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
+
+ // pullSecret is the name of a Secret that would be used for setting up
+ // the authentication for pulling the container images from the private Docker
+ // registries
+ PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"`
+
+ // env contains additional environment variables you want to pass into a builder container.
+ Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
+
+ // exposeDockerSocket will allow running Docker commands (and build container images) from
+ // inside the container.
+ // TODO: Allow admins to enforce 'false' for this option
+ ExposeDockerSocket bool `json:"exposeDockerSocket,omitempty" protobuf:"varint,4,opt,name=exposeDockerSocket"`
+
+ // forcePull describes if the controller should configure the build pod to always pull the images
+ // for the builder or only pull if it is not present locally
+ ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"`
+
+ // secrets is a list of additional secrets that will be included in the build pod
+ Secrets []SecretSpec `json:"secrets,omitempty" protobuf:"bytes,6,rep,name=secrets"`
+
+ // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder
+ BuildAPIVersion string `json:"buildAPIVersion,omitempty" protobuf:"bytes,7,opt,name=buildAPIVersion"`
+}
+
+// ImageOptimizationPolicy describes what optimizations the builder can perform when building images.
+type ImageOptimizationPolicy string
+
+const (
+ // ImageOptimizationNone will generate a canonical container image as produced by the
+ // `container image build` command.
+ ImageOptimizationNone ImageOptimizationPolicy = "None"
+
+ // ImageOptimizationSkipLayers is an experimental policy and will avoid creating
+ // unique layers for each dockerfile line, resulting in smaller images and saving time
+ // during creation. Some Dockerfile syntax is not fully supported - content added to
+ // a VOLUME by an earlier layer may have incorrect uid, gid, and filesystem permissions.
+ // If an unsupported setting is detected, the build will fail.
+ ImageOptimizationSkipLayers ImageOptimizationPolicy = "SkipLayers"
+
+ // ImageOptimizationSkipLayersAndWarn is the same as SkipLayers, but will only
+ // warn to the build output instead of failing when unsupported syntax is detected. This
+ // policy is experimental.
+ ImageOptimizationSkipLayersAndWarn ImageOptimizationPolicy = "SkipLayersAndWarn"
+)
+
+// DockerBuildStrategy defines input parameters specific to container image build.
+type DockerBuildStrategy struct {
+ // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides
+ // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds,
+ // this will replace the image in the last FROM directive of the file.
+ From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,1,opt,name=from"`
+
+ // pullSecret is the name of a Secret that would be used for setting up
+ // the authentication for pulling the container images from the private Docker
+ // registries
+ PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"`
+
+ // noCache if set to true indicates that the container image build must be executed with the
+ // --no-cache=true flag
+ NoCache bool `json:"noCache,omitempty" protobuf:"varint,3,opt,name=noCache"`
+
+ // env contains additional environment variables you want to pass into a builder container.
+ Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"`
+
+ // forcePull describes if the builder should pull the images from registry prior to building.
+ ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"`
+
+ // dockerfilePath is the path of the Dockerfile that will be used to build the container image,
+ // relative to the root of the context (contextDir).
+ // Defaults to `Dockerfile` if unset.
+ DockerfilePath string `json:"dockerfilePath,omitempty" protobuf:"bytes,6,opt,name=dockerfilePath"`
+
+ // buildArgs contains build arguments that will be resolved in the Dockerfile. See
+ // https://docs.docker.com/engine/reference/builder/#/arg for more details.
+ // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field
+ // are ignored.
+ BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,7,rep,name=buildArgs"`
+
+ // imageOptimizationPolicy describes what optimizations the system can use when building images
+ // to reduce the final size or time spent building the image. The default policy is 'None' which
+ // means the final build image will be equivalent to an image created by the container image build API.
+ // The experimental policy 'SkipLayers' will avoid commiting new layers in between each
+ // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None'
+ // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as
+ // 'SkipLayers' but simply warns if compatibility cannot be preserved.
+ ImageOptimizationPolicy *ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty" protobuf:"bytes,8,opt,name=imageOptimizationPolicy,casttype=ImageOptimizationPolicy"`
+
+ // volumes is a list of input volumes that can be mounted into the builds runtime environment.
+ // Only a subset of Kubernetes Volume sources are supported by builds.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes
+ // +listType=map
+ // +listMapKey=name
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"`
+}
+
+// SourceBuildStrategy defines input parameters specific to an Source build.
+type SourceBuildStrategy struct {
+ // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which
+ // the container image should be pulled
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
+
+ // pullSecret is the name of a Secret that would be used for setting up
+ // the authentication for pulling the container images from the private Docker
+ // registries
+ PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"`
+
+ // env contains additional environment variables you want to pass into a builder container.
+ Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
+
+ // scripts is the location of Source scripts
+ Scripts string `json:"scripts,omitempty" protobuf:"bytes,4,opt,name=scripts"`
+
+ // incremental flag forces the Source build to do incremental builds if true.
+ Incremental *bool `json:"incremental,omitempty" protobuf:"varint,5,opt,name=incremental"`
+
+ // forcePull describes if the builder should pull the images from registry prior to building.
+ ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,6,opt,name=forcePull"`
+
+ // deprecated json field, do not reuse: runtimeImage
+ // +k8s:protobuf-deprecated=runtimeImage,7
+
+ // deprecated json field, do not reuse: runtimeArtifacts
+ // +k8s:protobuf-deprecated=runtimeArtifacts,8
+
+ // volumes is a list of input volumes that can be mounted into the builds runtime environment.
+ // Only a subset of Kubernetes Volume sources are supported by builds.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes
+ // +listType=map
+ // +listMapKey=name
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"`
+}
+
+// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build.
+// Deprecated: use OpenShift Pipelines
+type JenkinsPipelineBuildStrategy struct {
+ // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
+ // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are
+ // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.
+ JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"`
+
+ // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
+ Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"`
+
+ // env contains additional environment variables you want to pass into a build pipeline.
+ Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
+}
+
+// A BuildPostCommitSpec holds a build post commit hook specification. The hook
+// executes a command in a temporary container running the build output image,
+// immediately after the last layer of the image is committed and before the
+// image is pushed to a registry. The command is executed with the current
+// working directory ($PWD) set to the image's WORKDIR.
+//
+// The build will be marked as failed if the hook execution fails. It will fail
+// if the script or command return a non-zero exit code, or if there is any
+// other error related to starting the temporary container.
+//
+// There are five different ways to configure the hook. As an example, all forms
+// below are equivalent and will execute `rake test --verbose`.
+//
+// 1. Shell script:
+//
+// "postCommit": {
+// "script": "rake test --verbose",
+// }
+//
+// The above is a convenient form which is equivalent to:
+//
+// "postCommit": {
+// "command": ["/bin/sh", "-ic"],
+// "args": ["rake test --verbose"]
+// }
+//
+// 2. A command as the image entrypoint:
+//
+// "postCommit": {
+// "commit": ["rake", "test", "--verbose"]
+// }
+//
+// Command overrides the image entrypoint in the exec form, as documented in
+// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint.
+//
+// 3. Pass arguments to the default entrypoint:
+//
+// "postCommit": {
+// "args": ["rake", "test", "--verbose"]
+// }
+//
+// This form is only useful if the image entrypoint can handle arguments.
+//
+// 4. Shell script with arguments:
+//
+// "postCommit": {
+// "script": "rake test $1",
+// "args": ["--verbose"]
+// }
+//
+// This form is useful if you need to pass arguments that would otherwise be
+// hard to quote properly in the shell script. In the script, $0 will be
+// "/bin/sh" and $1, $2, etc, are the positional arguments from Args.
+//
+// 5. Command with arguments:
+//
+// "postCommit": {
+// "command": ["rake", "test"],
+// "args": ["--verbose"]
+// }
+//
+// This form is equivalent to appending the arguments to the Command slice.
+//
+// It is invalid to provide both Script and Command simultaneously. If none of
+// the fields are specified, the hook is not executed.
+type BuildPostCommitSpec struct {
+ // command is the command to run. It may not be specified with Script.
+ // This might be needed if the image doesn't have `/bin/sh`, or if you
+ // do not want to use a shell. In all other cases, using Script might be
+ // more convenient.
+ Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
+ // args is a list of arguments that are provided to either Command,
+ // Script or the container image's default entrypoint. The arguments are
+ // placed immediately after the command to be run.
+ Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"`
+ // script is a shell script to be run with `/bin/sh -ic`. It may not be
+ // specified with Command. Use Script when a shell script is appropriate
+ // to execute the post build hook, for example for running unit tests
+ // with `rake test`. If you need control over the image entrypoint, or
+ // if the image does not have `/bin/sh`, use Command and/or Args.
+ // The `-i` flag is needed to support CentOS and RHEL images that use
+ // Software Collections (SCL), in order to have the appropriate
+ // collections enabled in the shell. E.g., in the Ruby image, this is
+ // necessary to make `ruby`, `bundle` and other binaries available in
+ // the PATH.
+ Script string `json:"script,omitempty" protobuf:"bytes,3,opt,name=script"`
+}
+
+// BuildOutput is input to a build strategy and describes the container image that the strategy
+// should produce.
+type BuildOutput struct {
+ // to defines an optional location to push the output of this build to.
+ // Kind must be one of 'ImageStreamTag' or 'DockerImage'.
+ // This value will be used to look up a container image repository to push to.
+ // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of
+ // the build unless Namespace is specified.
+ To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"`
+
+ // PushSecret is the name of a Secret that would be used for setting
+ // up the authentication for executing the Docker push to authentication
+ // enabled Docker Registry (or Docker Hub).
+ PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"`
+
+ // imageLabels define a list of labels that are applied to the resulting image. If there
+ // are multiple labels with the same name then the last one in the list is used.
+ ImageLabels []ImageLabel `json:"imageLabels,omitempty" protobuf:"bytes,3,rep,name=imageLabels"`
+}
+
+// ImageLabel represents a label applied to the resulting image.
+type ImageLabel struct {
+ // name defines the name of the label. It must have non-zero length.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // value defines the literal value of the label.
+ Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+}
+
+// +genclient
+// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=BuildRequest,result=Build
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.
+//
+// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BuildConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec holds all the input necessary to produce a new build, and the conditions when
+ // to trigger them.
+ Spec BuildConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // status holds any relevant information about a build config
+ // +optional
+ Status BuildConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
+}
+
+// BuildConfigSpec describes when and how builds are created
+type BuildConfigSpec struct {
+
+ //triggers determine how new Builds can be launched from a BuildConfig. If
+ //no triggers are defined, a new build can only occur as a result of an
+ //explicit client build creation.
+ // +optional
+ Triggers []BuildTriggerPolicy `json:"triggers,omitempty" protobuf:"bytes,1,rep,name=triggers"`
+
+ // RunPolicy describes how the new build created from this build
+ // configuration will be scheduled for execution.
+ // This is optional, if not specified we default to "Serial".
+ RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"`
+
+ // CommonSpec is the desired build specification
+ CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"`
+
+ // successfulBuildsHistoryLimit is the number of old successful builds to retain.
+ // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set.
+ // If removed after the BuildConfig has been created, all successful builds are retained.
+ SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty" protobuf:"varint,4,opt,name=successfulBuildsHistoryLimit"`
+
+ // failedBuildsHistoryLimit is the number of old failed builds to retain.
+ // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set.
+ // If removed after the BuildConfig has been created, all failed builds are retained.
+ FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty" protobuf:"varint,5,opt,name=failedBuildsHistoryLimit"`
+}
+
+// BuildRunPolicy defines the behaviour of how the new builds are executed
+// from the existing build configuration.
+type BuildRunPolicy string
+
+const (
+ // BuildRunPolicyParallel schedules new builds immediately after they are
+ // created. Builds will be executed in parallel.
+ BuildRunPolicyParallel BuildRunPolicy = "Parallel"
+
+ // BuildRunPolicySerial schedules new builds to execute in a sequence as
+ // they are created. Every build gets queued up and will execute when the
+ // previous build completes. This is the default policy.
+ BuildRunPolicySerial BuildRunPolicy = "Serial"
+
+ // BuildRunPolicySerialLatestOnly schedules only the latest build to execute,
+ // cancelling all the previously queued build.
+ BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly"
+)
+
+// BuildConfigStatus contains current state of the build config object.
+type BuildConfigStatus struct {
+ // lastVersion is used to inform about number of last triggered build.
+ LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"`
+
+ // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec,
+ // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry
+ // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.
+ ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"`
+}
+
+// SecretLocalReference contains information that points to the local secret being used
+type SecretLocalReference struct {
+ // Name is the name of the resource in the same namespace being referenced
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+}
+
+// WebHookTrigger is a trigger that gets invoked using a webhook type of post
+type WebHookTrigger struct {
+ // secret used to validate requests.
+ // Deprecated: use SecretReference instead.
+ Secret string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
+
+ // allowEnv determines whether the webhook can set environment variables; can only
+ // be set to true for GenericWebHook.
+ AllowEnv bool `json:"allowEnv,omitempty" protobuf:"varint,2,opt,name=allowEnv"`
+
+ // secretReference is a reference to a secret in the same namespace,
+ // containing the value to be validated when the webhook is invoked.
+ // The secret being referenced must contain a key named "WebHookSecretKey", the value
+ // of which will be checked against the value supplied in the webhook invocation.
+ SecretReference *SecretLocalReference `json:"secretReference,omitempty" protobuf:"bytes,3,opt,name=secretReference"`
+}
+
+// ImageChangeTrigger allows builds to be triggered when an ImageStream changes
+type ImageChangeTrigger struct {
+ // lastTriggeredImageID is used internally by the ImageChangeController to save last
+ // used image ID for build
+ // This field is deprecated and will be removed in a future release.
+ // Deprecated
+ LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"`
+
+ // from is a reference to an ImageStreamTag that will trigger a build when updated
+ // It is optional. If no From is specified, the From image from the build strategy
+ // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in
+ // a build configuration.
+ From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"`
+
+ // paused is true if this trigger is temporarily disabled. Optional.
+ Paused bool `json:"paused,omitempty" protobuf:"varint,3,opt,name=paused"`
+}
+
+// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name.
+type ImageStreamTagReference struct {
+ // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+
+ // name is the name of the ImageStreamTag for an ImageChangeTrigger
+ Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
+}
+
+// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy
+// specified in the BuildConfigSpec.Triggers struct.
+type ImageChangeTriggerStatus struct {
+ // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started.
+ // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started.
+ LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"`
+
+ // from is the ImageStreamTag that is the source of the trigger.
+ From ImageStreamTagReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"`
+
+ // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start.
+ // This field is only updated when this trigger specifically started a Build.
+ LastTriggerTime metav1.Time `json:"lastTriggerTime,omitempty" protobuf:"bytes,3,opt,name=lastTriggerTime"`
+}
+
+// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.
+type BuildTriggerPolicy struct {
+ // type is the type of build trigger. Valid values:
+ //
+ // - GitHub
+ // GitHubWebHookBuildTriggerType represents a trigger that launches builds on
+ // GitHub webhook invocations
+ //
+ // - Generic
+ // GenericWebHookBuildTriggerType represents a trigger that launches builds on
+ // generic webhook invocations
+ //
+ // - GitLab
+ // GitLabWebHookBuildTriggerType represents a trigger that launches builds on
+ // GitLab webhook invocations
+ //
+ // - Bitbucket
+ // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on
+ // Bitbucket webhook invocations
+ //
+ // - ImageChange
+ // ImageChangeBuildTriggerType represents a trigger that launches builds on
+ // availability of a new version of an image
+ //
+ // - ConfigChange
+ // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation
+ // WARNING: In the future the behavior will change to trigger a build on any config change
+ Type BuildTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildTriggerType"`
+
+ // github contains the parameters for a GitHub webhook type of trigger
+ GitHubWebHook *WebHookTrigger `json:"github,omitempty" protobuf:"bytes,2,opt,name=github"`
+
+ // generic contains the parameters for a Generic webhook type of trigger
+ GenericWebHook *WebHookTrigger `json:"generic,omitempty" protobuf:"bytes,3,opt,name=generic"`
+
+ // imageChange contains parameters for an ImageChange type of trigger
+ ImageChange *ImageChangeTrigger `json:"imageChange,omitempty" protobuf:"bytes,4,opt,name=imageChange"`
+
+ // GitLabWebHook contains the parameters for a GitLab webhook type of trigger
+ GitLabWebHook *WebHookTrigger `json:"gitlab,omitempty" protobuf:"bytes,5,opt,name=gitlab"`
+
+ // BitbucketWebHook contains the parameters for a Bitbucket webhook type of
+ // trigger
+ BitbucketWebHook *WebHookTrigger `json:"bitbucket,omitempty" protobuf:"bytes,6,opt,name=bitbucket"`
+}
+
+// BuildTriggerType refers to a specific BuildTriggerPolicy implementation.
+type BuildTriggerType string
+
+const (
+ // GitHubWebHookBuildTriggerType represents a trigger that launches builds on
+ // GitHub webhook invocations
+ GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub"
+ GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github"
+
+ // GenericWebHookBuildTriggerType represents a trigger that launches builds on
+ // generic webhook invocations
+ GenericWebHookBuildTriggerType BuildTriggerType = "Generic"
+ GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic"
+
+ // GitLabWebHookBuildTriggerType represents a trigger that launches builds on
+ // GitLab webhook invocations
+ GitLabWebHookBuildTriggerType BuildTriggerType = "GitLab"
+
+ // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on
+ // Bitbucket webhook invocations
+ BitbucketWebHookBuildTriggerType BuildTriggerType = "Bitbucket"
+
+ // ImageChangeBuildTriggerType represents a trigger that launches builds on
+ // availability of a new version of an image
+ ImageChangeBuildTriggerType BuildTriggerType = "ImageChange"
+ ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange"
+
+ // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation
+ // WARNING: In the future the behavior will change to trigger a build on any config change
+ ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildList is a collection of Builds.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BuildList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a list of builds
+ Items []Build `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildConfigList is a collection of BuildConfigs.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BuildConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a list of build configs
+ Items []BuildConfig `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// GenericWebHookEvent is the payload expected for a generic webhook post
+type GenericWebHookEvent struct {
+ // type is the type of source repository
+ // +k8s:conversion-gen=false
+ Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"`
+
+ // git is the git information if the Type is BuildSourceGit
+ Git *GitInfo `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"`
+
+ // env contains additional environment variables you want to pass into a builder container.
+ // ValueFrom is not supported.
+ Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
+
+ // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"`
+}
+
+// GitInfo is the aggregated git information for a generic webhook post
+type GitInfo struct {
+ GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"`
+ GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"`
+
+ // Refs is a list of GitRefs for the provided repo - generally sent
+ // when used from a post-receive hook. This field is optional and is
+ // used when sending multiple refs
+ Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"`
+}
+
+// GitRefInfo is a single ref
+type GitRefInfo struct {
+ GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"`
+ GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildLog is the (unused) resource associated with the build log redirector
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BuildLog struct {
+ metav1.TypeMeta `json:",inline"`
+}
+
+// DockerStrategyOptions contains extra strategy options for container image builds
+type DockerStrategyOptions struct {
+ // Args contains any build arguments that are to be passed to Docker. See
+ // https://docs.docker.com/engine/reference/builder/#/arg for more details
+ BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,1,rep,name=buildArgs"`
+
+ // noCache overrides the docker-strategy noCache option in the build config
+ NoCache *bool `json:"noCache,omitempty" protobuf:"varint,2,opt,name=noCache"`
+}
+
+// SourceStrategyOptions contains extra strategy options for Source builds
+type SourceStrategyOptions struct {
+ // incremental overrides the source-strategy incremental option in the build config
+ Incremental *bool `json:"incremental,omitempty" protobuf:"varint,1,opt,name=incremental"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildRequest is the resource used to pass parameters to build generator
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BuildRequest struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // revision is the information from the source for a specific repo snapshot.
+ Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
+
+ // triggeredByImage is the Image that triggered this build.
+ TriggeredByImage *corev1.ObjectReference `json:"triggeredByImage,omitempty" protobuf:"bytes,3,opt,name=triggeredByImage"`
+
+ // from is the reference to the ImageStreamTag that triggered the build.
+ From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"`
+
+ // binary indicates a request to build from a binary provided to the builder
+ Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,5,opt,name=binary"`
+
+ // lastVersion (optional) is the LastVersion of the BuildConfig that was used
+ // to generate the build. If the BuildConfig in the generator doesn't match, a build will
+ // not be generated.
+ LastVersion *int64 `json:"lastVersion,omitempty" protobuf:"varint,6,opt,name=lastVersion"`
+
+ // env contains additional environment variables you want to pass into a builder container.
+ Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,7,rep,name=env"`
+
+ // triggeredBy describes which triggers started the most recent update to the
+ // build configuration and contains information about those triggers.
+ TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,8,rep,name=triggeredBy"`
+
+ // DockerStrategyOptions contains additional docker-strategy specific options for the build
+ DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"`
+
+ // SourceStrategyOptions contains additional source-strategy specific options for the build
+ SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BinaryBuildRequestOptions are the options required to fully speficy a binary build request
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BinaryBuildRequestOptions struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // asFile determines if the binary should be created as a file within the source rather than extracted as an archive
+ AsFile string `json:"asFile,omitempty" protobuf:"bytes,2,opt,name=asFile"`
+
+ // TODO: Improve map[string][]string conversion so we can handled nested objects
+
+ // revision.commit is the value identifying a specific commit
+ Commit string `json:"revision.commit,omitempty" protobuf:"bytes,3,opt,name=revisionCommit"`
+
+ // revision.message is the description of a specific commit
+ Message string `json:"revision.message,omitempty" protobuf:"bytes,4,opt,name=revisionMessage"`
+
+ // revision.authorName of the source control user
+ AuthorName string `json:"revision.authorName,omitempty" protobuf:"bytes,5,opt,name=revisionAuthorName"`
+
+ // revision.authorEmail of the source control user
+ AuthorEmail string `json:"revision.authorEmail,omitempty" protobuf:"bytes,6,opt,name=revisionAuthorEmail"`
+
+ // revision.committerName of the source control user
+ CommitterName string `json:"revision.committerName,omitempty" protobuf:"bytes,7,opt,name=revisionCommitterName"`
+
+ // revision.committerEmail of the source control user
+ CommitterEmail string `json:"revision.committerEmail,omitempty" protobuf:"bytes,8,opt,name=revisionCommitterEmail"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildLogOptions is the REST options for a build log
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BuildLogOptions struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // cointainer for which to stream logs. Defaults to only container if there is one container in the pod.
+ Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
+ // follow if true indicates that the build log should be streamed until
+ // the build terminates.
+ Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
+ // previous returns previous build logs. Defaults to false.
+ Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
+ // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
+ // sinceTime is an RFC3339 timestamp from which to show logs. If this value
+ // precedes the time a pod was started, only logs since the pod start will be returned.
+ // If this value is in the future, no logs will be returned.
+ // Only one of sinceSeconds or sinceTime may be specified.
+ SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
+ // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+ // of log output. Defaults to false.
+ Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
+ // tailLines, If set, is the number of lines from the end of the logs to show. If not specified,
+ // logs are shown from the creation of the container or sinceSeconds or sinceTime
+ TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
+ // limitBytes, If set, is the number of bytes to read from the server before terminating the
+ // log output. This may not display a complete final line of logging, and may return
+ // slightly more or slightly less than the specified limit.
+ LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
+
+ // noWait if true causes the call to return immediately even if the build
+ // is not available yet. Otherwise the server will wait until the build has started.
+ // TODO: Fix the tag to 'noWait' in v2
+ NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"`
+
+ // version of the build for which to view logs.
+ Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"`
+
+ // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
+ // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
+ // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
+ // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
+ // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
+ // the actual log data coming from the real kubelet).
+ // +optional
+ InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,11,opt,name=insecureSkipTLSVerifyBackend"`
+}
+
+// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point
+type SecretSpec struct {
+ // secretSource is a reference to the secret
+ SecretSource corev1.LocalObjectReference `json:"secretSource" protobuf:"bytes,1,opt,name=secretSource"`
+
+ // mountPath is the path at which to mount the secret
+ MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"`
+}
+
+// BuildVolume describes a volume that is made available to build pods,
+// such that it can be mounted into buildah's runtime environment.
+// Only a subset of Kubernetes Volume sources are supported.
+type BuildVolume struct {
+ // name is a unique identifier for this BuildVolume.
+ // It must conform to the Kubernetes DNS label standard and be unique within the pod.
+ // Names that collide with those added by the build controller will result in a
+ // failed build with an error message detailing which name caused the error.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ // +required
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // source represents the location and type of the mounted volume.
+ // +required
+ Source BuildVolumeSource `json:"source" protobuf:"bytes,2,opt,name=source"`
+
+ // mounts represents the location of the volume in the image build container
+ // +required
+ // +listType=map
+ // +listMapKey=destinationPath
+ // +patchMergeKey=destinationPath
+ // +patchStrategy=merge
+ Mounts []BuildVolumeMount `json:"mounts" patchStrategy:"merge" patchMergeKey:"destinationPath" protobuf:"bytes,3,opt,name=mounts"`
+}
+
+// BuildVolumeSourceType represents a build volume source type
+type BuildVolumeSourceType string
+
+const (
+ // BuildVolumeSourceTypeSecret is the Secret build source volume type
+ BuildVolumeSourceTypeSecret BuildVolumeSourceType = "Secret"
+
+ // BuildVolumeSourceTypeConfigmap is the ConfigMap build source volume type
+ BuildVolumeSourceTypeConfigMap BuildVolumeSourceType = "ConfigMap"
+
+ // BuildVolumeSourceTypeCSI is the CSI build source volume type
+ BuildVolumeSourceTypeCSI BuildVolumeSourceType = "CSI"
+)
+
+// BuildVolumeSource represents the source of a volume to mount
+// Only one of its supported types may be specified at any given time.
+type BuildVolumeSource struct {
+
+ // type is the BuildVolumeSourceType for the volume source.
+ // Type must match the populated volume source.
+ // Valid types are: Secret, ConfigMap
+ Type BuildVolumeSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildVolumeSourceType"`
+
+ // secret represents a Secret that should populate this volume.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ // +optional
+ Secret *corev1.SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+
+ // configMap represents a ConfigMap that should populate this volume
+ // +optional
+ ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
+
+ // csi represents ephemeral storage provided by external CSI drivers which support this capability
+ // +optional
+ CSI *corev1.CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,4,opt,name=csi"`
+}
+
+// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment.
+type BuildVolumeMount struct {
+ // destinationPath is the path within the buildah runtime environment at which the volume should be mounted.
+ // The transient mount within the build image and the backing volume will both be mounted read only.
+ // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated
+ // by the builder process
+ // Paths that collide with those added by the build controller will result in a
+ // failed build with an error message detailing which path caused the error.
+ DestinationPath string `json:"destinationPath" protobuf:"bytes,1,opt,name=destinationPath"`
+}
diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..d36b28c82b
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go
@@ -0,0 +1,1610 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BinaryBuildRequestOptions) DeepCopyInto(out *BinaryBuildRequestOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildRequestOptions.
+func (in *BinaryBuildRequestOptions) DeepCopy() *BinaryBuildRequestOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(BinaryBuildRequestOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BinaryBuildRequestOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BinaryBuildSource) DeepCopyInto(out *BinaryBuildSource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildSource.
+func (in *BinaryBuildSource) DeepCopy() *BinaryBuildSource {
+ if in == nil {
+ return nil
+ }
+ out := new(BinaryBuildSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BitbucketWebHookCause) DeepCopyInto(out *BitbucketWebHookCause) {
+ *out = *in
+ in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketWebHookCause.
+func (in *BitbucketWebHookCause) DeepCopy() *BitbucketWebHookCause {
+ if in == nil {
+ return nil
+ }
+ out := new(BitbucketWebHookCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Build) DeepCopyInto(out *Build) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build.
+func (in *Build) DeepCopy() *Build {
+ if in == nil {
+ return nil
+ }
+ out := new(Build)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Build) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildCondition) DeepCopyInto(out *BuildCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildCondition.
+func (in *BuildCondition) DeepCopy() *BuildCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildConfig) DeepCopyInto(out *BuildConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfig.
+func (in *BuildConfig) DeepCopy() *BuildConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildConfigList) DeepCopyInto(out *BuildConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BuildConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigList.
+func (in *BuildConfigList) DeepCopy() *BuildConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildConfigSpec) DeepCopyInto(out *BuildConfigSpec) {
+ *out = *in
+ if in.Triggers != nil {
+ in, out := &in.Triggers, &out.Triggers
+ *out = make([]BuildTriggerPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.CommonSpec.DeepCopyInto(&out.CommonSpec)
+ if in.SuccessfulBuildsHistoryLimit != nil {
+ in, out := &in.SuccessfulBuildsHistoryLimit, &out.SuccessfulBuildsHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ if in.FailedBuildsHistoryLimit != nil {
+ in, out := &in.FailedBuildsHistoryLimit, &out.FailedBuildsHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigSpec.
+func (in *BuildConfigSpec) DeepCopy() *BuildConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildConfigStatus) DeepCopyInto(out *BuildConfigStatus) {
+ *out = *in
+ if in.ImageChangeTriggers != nil {
+ in, out := &in.ImageChangeTriggers, &out.ImageChangeTriggers
+ *out = make([]ImageChangeTriggerStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigStatus.
+func (in *BuildConfigStatus) DeepCopy() *BuildConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildList) DeepCopyInto(out *BuildList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Build, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList.
+func (in *BuildList) DeepCopy() *BuildList {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildLog) DeepCopyInto(out *BuildLog) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLog.
+func (in *BuildLog) DeepCopy() *BuildLog {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildLog)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildLog) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildLogOptions) DeepCopyInto(out *BuildLogOptions) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.SinceSeconds != nil {
+ in, out := &in.SinceSeconds, &out.SinceSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.SinceTime != nil {
+ in, out := &in.SinceTime, &out.SinceTime
+ *out = (*in).DeepCopy()
+ }
+ if in.TailLines != nil {
+ in, out := &in.TailLines, &out.TailLines
+ *out = new(int64)
+ **out = **in
+ }
+ if in.LimitBytes != nil {
+ in, out := &in.LimitBytes, &out.LimitBytes
+ *out = new(int64)
+ **out = **in
+ }
+ if in.Version != nil {
+ in, out := &in.Version, &out.Version
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLogOptions.
+func (in *BuildLogOptions) DeepCopy() *BuildLogOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildLogOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildLogOptions) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildOutput) DeepCopyInto(out *BuildOutput) {
+ *out = *in
+ if in.To != nil {
+ in, out := &in.To, &out.To
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.PushSecret != nil {
+ in, out := &in.PushSecret, &out.PushSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOutput.
+func (in *BuildOutput) DeepCopy() *BuildOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildOutput)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildPostCommitSpec) DeepCopyInto(out *BuildPostCommitSpec) {
+ *out = *in
+ if in.Command != nil {
+ in, out := &in.Command, &out.Command
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Args != nil {
+ in, out := &in.Args, &out.Args
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPostCommitSpec.
+func (in *BuildPostCommitSpec) DeepCopy() *BuildPostCommitSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildPostCommitSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildRequest) DeepCopyInto(out *BuildRequest) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Revision != nil {
+ in, out := &in.Revision, &out.Revision
+ *out = new(SourceRevision)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TriggeredByImage != nil {
+ in, out := &in.TriggeredByImage, &out.TriggeredByImage
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.Binary != nil {
+ in, out := &in.Binary, &out.Binary
+ *out = new(BinaryBuildSource)
+ **out = **in
+ }
+ if in.LastVersion != nil {
+ in, out := &in.LastVersion, &out.LastVersion
+ *out = new(int64)
+ **out = **in
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.TriggeredBy != nil {
+ in, out := &in.TriggeredBy, &out.TriggeredBy
+ *out = make([]BuildTriggerCause, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DockerStrategyOptions != nil {
+ in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions
+ *out = new(DockerStrategyOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SourceStrategyOptions != nil {
+ in, out := &in.SourceStrategyOptions, &out.SourceStrategyOptions
+ *out = new(SourceStrategyOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRequest.
+func (in *BuildRequest) DeepCopy() *BuildRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildRequest) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildSource) DeepCopyInto(out *BuildSource) {
+ *out = *in
+ if in.Binary != nil {
+ in, out := &in.Binary, &out.Binary
+ *out = new(BinaryBuildSource)
+ **out = **in
+ }
+ if in.Dockerfile != nil {
+ in, out := &in.Dockerfile, &out.Dockerfile
+ *out = new(string)
+ **out = **in
+ }
+ if in.Git != nil {
+ in, out := &in.Git, &out.Git
+ *out = new(GitBuildSource)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Images != nil {
+ in, out := &in.Images, &out.Images
+ *out = make([]ImageSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SourceSecret != nil {
+ in, out := &in.SourceSecret, &out.SourceSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Secrets != nil {
+ in, out := &in.Secrets, &out.Secrets
+ *out = make([]SecretBuildSource, len(*in))
+ copy(*out, *in)
+ }
+ if in.ConfigMaps != nil {
+ in, out := &in.ConfigMaps, &out.ConfigMaps
+ *out = make([]ConfigMapBuildSource, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSource.
+func (in *BuildSource) DeepCopy() *BuildSource {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
+ *out = *in
+ in.CommonSpec.DeepCopyInto(&out.CommonSpec)
+ if in.TriggeredBy != nil {
+ in, out := &in.TriggeredBy, &out.TriggeredBy
+ *out = make([]BuildTriggerCause, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec.
+func (in *BuildSpec) DeepCopy() *BuildSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildStatus) DeepCopyInto(out *BuildStatus) {
+ *out = *in
+ if in.StartTimestamp != nil {
+ in, out := &in.StartTimestamp, &out.StartTimestamp
+ *out = (*in).DeepCopy()
+ }
+ if in.CompletionTimestamp != nil {
+ in, out := &in.CompletionTimestamp, &out.CompletionTimestamp
+ *out = (*in).DeepCopy()
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ in.Output.DeepCopyInto(&out.Output)
+ if in.Stages != nil {
+ in, out := &in.Stages, &out.Stages
+ *out = make([]StageInfo, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]BuildCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus.
+func (in *BuildStatus) DeepCopy() *BuildStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildStatusOutput) DeepCopyInto(out *BuildStatusOutput) {
+ *out = *in
+ if in.To != nil {
+ in, out := &in.To, &out.To
+ *out = new(BuildStatusOutputTo)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutput.
+func (in *BuildStatusOutput) DeepCopy() *BuildStatusOutput {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildStatusOutput)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildStatusOutputTo) DeepCopyInto(out *BuildStatusOutputTo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutputTo.
+func (in *BuildStatusOutputTo) DeepCopy() *BuildStatusOutputTo {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildStatusOutputTo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildStrategy) DeepCopyInto(out *BuildStrategy) {
+ *out = *in
+ if in.DockerStrategy != nil {
+ in, out := &in.DockerStrategy, &out.DockerStrategy
+ *out = new(DockerBuildStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SourceStrategy != nil {
+ in, out := &in.SourceStrategy, &out.SourceStrategy
+ *out = new(SourceBuildStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.CustomStrategy != nil {
+ in, out := &in.CustomStrategy, &out.CustomStrategy
+ *out = new(CustomBuildStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.JenkinsPipelineStrategy != nil {
+ in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy
+ *out = new(JenkinsPipelineBuildStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStrategy.
+func (in *BuildStrategy) DeepCopy() *BuildStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildTriggerCause) DeepCopyInto(out *BuildTriggerCause) {
+ *out = *in
+ if in.GenericWebHook != nil {
+ in, out := &in.GenericWebHook, &out.GenericWebHook
+ *out = new(GenericWebHookCause)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GitHubWebHook != nil {
+ in, out := &in.GitHubWebHook, &out.GitHubWebHook
+ *out = new(GitHubWebHookCause)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ImageChangeBuild != nil {
+ in, out := &in.ImageChangeBuild, &out.ImageChangeBuild
+ *out = new(ImageChangeCause)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GitLabWebHook != nil {
+ in, out := &in.GitLabWebHook, &out.GitLabWebHook
+ *out = new(GitLabWebHookCause)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BitbucketWebHook != nil {
+ in, out := &in.BitbucketWebHook, &out.BitbucketWebHook
+ *out = new(BitbucketWebHookCause)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerCause.
+func (in *BuildTriggerCause) DeepCopy() *BuildTriggerCause {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildTriggerCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildTriggerPolicy) DeepCopyInto(out *BuildTriggerPolicy) {
+ *out = *in
+ if in.GitHubWebHook != nil {
+ in, out := &in.GitHubWebHook, &out.GitHubWebHook
+ *out = new(WebHookTrigger)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GenericWebHook != nil {
+ in, out := &in.GenericWebHook, &out.GenericWebHook
+ *out = new(WebHookTrigger)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ImageChange != nil {
+ in, out := &in.ImageChange, &out.ImageChange
+ *out = new(ImageChangeTrigger)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GitLabWebHook != nil {
+ in, out := &in.GitLabWebHook, &out.GitLabWebHook
+ *out = new(WebHookTrigger)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BitbucketWebHook != nil {
+ in, out := &in.BitbucketWebHook, &out.BitbucketWebHook
+ *out = new(WebHookTrigger)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerPolicy.
+func (in *BuildTriggerPolicy) DeepCopy() *BuildTriggerPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildTriggerPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildVolume) DeepCopyInto(out *BuildVolume) {
+ *out = *in
+ in.Source.DeepCopyInto(&out.Source)
+ if in.Mounts != nil {
+ in, out := &in.Mounts, &out.Mounts
+ *out = make([]BuildVolumeMount, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolume.
+func (in *BuildVolume) DeepCopy() *BuildVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildVolumeMount) DeepCopyInto(out *BuildVolumeMount) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeMount.
+func (in *BuildVolumeMount) DeepCopy() *BuildVolumeMount {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildVolumeMount)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildVolumeSource) DeepCopyInto(out *BuildVolumeSource) {
+ *out = *in
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(corev1.SecretVolumeSource)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ConfigMap != nil {
+ in, out := &in.ConfigMap, &out.ConfigMap
+ *out = new(corev1.ConfigMapVolumeSource)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.CSI != nil {
+ in, out := &in.CSI, &out.CSI
+ *out = new(corev1.CSIVolumeSource)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeSource.
+func (in *BuildVolumeSource) DeepCopy() *BuildVolumeSource {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildVolumeSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CommonSpec) DeepCopyInto(out *CommonSpec) {
+ *out = *in
+ in.Source.DeepCopyInto(&out.Source)
+ if in.Revision != nil {
+ in, out := &in.Revision, &out.Revision
+ *out = new(SourceRevision)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Strategy.DeepCopyInto(&out.Strategy)
+ in.Output.DeepCopyInto(&out.Output)
+ in.Resources.DeepCopyInto(&out.Resources)
+ in.PostCommit.DeepCopyInto(&out.PostCommit)
+ if in.CompletionDeadlineSeconds != nil {
+ in, out := &in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds
+ *out = new(int64)
+ **out = **in
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(OptionalNodeSelector, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.MountTrustedCA != nil {
+ in, out := &in.MountTrustedCA, &out.MountTrustedCA
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpec.
+func (in *CommonSpec) DeepCopy() *CommonSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CommonSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CommonWebHookCause) DeepCopyInto(out *CommonWebHookCause) {
+ *out = *in
+ if in.Revision != nil {
+ in, out := &in.Revision, &out.Revision
+ *out = new(SourceRevision)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonWebHookCause.
+func (in *CommonWebHookCause) DeepCopy() *CommonWebHookCause {
+ if in == nil {
+ return nil
+ }
+ out := new(CommonWebHookCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapBuildSource) DeepCopyInto(out *ConfigMapBuildSource) {
+ *out = *in
+ out.ConfigMap = in.ConfigMap
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapBuildSource.
+func (in *ConfigMapBuildSource) DeepCopy() *ConfigMapBuildSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigMapBuildSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomBuildStrategy) DeepCopyInto(out *CustomBuildStrategy) {
+ *out = *in
+ out.From = in.From
+ if in.PullSecret != nil {
+ in, out := &in.PullSecret, &out.PullSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Secrets != nil {
+ in, out := &in.Secrets, &out.Secrets
+ *out = make([]SecretSpec, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBuildStrategy.
+func (in *CustomBuildStrategy) DeepCopy() *CustomBuildStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomBuildStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerBuildStrategy) DeepCopyInto(out *DockerBuildStrategy) {
+ *out = *in
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.PullSecret != nil {
+ in, out := &in.PullSecret, &out.PullSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.BuildArgs != nil {
+ in, out := &in.BuildArgs, &out.BuildArgs
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ImageOptimizationPolicy != nil {
+ in, out := &in.ImageOptimizationPolicy, &out.ImageOptimizationPolicy
+ *out = new(ImageOptimizationPolicy)
+ **out = **in
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]BuildVolume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerBuildStrategy.
+func (in *DockerBuildStrategy) DeepCopy() *DockerBuildStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerBuildStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerStrategyOptions) DeepCopyInto(out *DockerStrategyOptions) {
+ *out = *in
+ if in.BuildArgs != nil {
+ in, out := &in.BuildArgs, &out.BuildArgs
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.NoCache != nil {
+ in, out := &in.NoCache, &out.NoCache
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerStrategyOptions.
+func (in *DockerStrategyOptions) DeepCopy() *DockerStrategyOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerStrategyOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenericWebHookCause) DeepCopyInto(out *GenericWebHookCause) {
+ *out = *in
+ if in.Revision != nil {
+ in, out := &in.Revision, &out.Revision
+ *out = new(SourceRevision)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookCause.
+func (in *GenericWebHookCause) DeepCopy() *GenericWebHookCause {
+ if in == nil {
+ return nil
+ }
+ out := new(GenericWebHookCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenericWebHookEvent) DeepCopyInto(out *GenericWebHookEvent) {
+ *out = *in
+ if in.Git != nil {
+ in, out := &in.Git, &out.Git
+ *out = new(GitInfo)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DockerStrategyOptions != nil {
+ in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions
+ *out = new(DockerStrategyOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookEvent.
+func (in *GenericWebHookEvent) DeepCopy() *GenericWebHookEvent {
+ if in == nil {
+ return nil
+ }
+ out := new(GenericWebHookEvent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitBuildSource) DeepCopyInto(out *GitBuildSource) {
+ *out = *in
+ in.ProxyConfig.DeepCopyInto(&out.ProxyConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitBuildSource.
+func (in *GitBuildSource) DeepCopy() *GitBuildSource {
+ if in == nil {
+ return nil
+ }
+ out := new(GitBuildSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitHubWebHookCause) DeepCopyInto(out *GitHubWebHookCause) {
+ *out = *in
+ if in.Revision != nil {
+ in, out := &in.Revision, &out.Revision
+ *out = new(SourceRevision)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubWebHookCause.
+func (in *GitHubWebHookCause) DeepCopy() *GitHubWebHookCause {
+ if in == nil {
+ return nil
+ }
+ out := new(GitHubWebHookCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitInfo) DeepCopyInto(out *GitInfo) {
+ *out = *in
+ in.GitBuildSource.DeepCopyInto(&out.GitBuildSource)
+ out.GitSourceRevision = in.GitSourceRevision
+ if in.Refs != nil {
+ in, out := &in.Refs, &out.Refs
+ *out = make([]GitRefInfo, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitInfo.
+func (in *GitInfo) DeepCopy() *GitInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(GitInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitLabWebHookCause) DeepCopyInto(out *GitLabWebHookCause) {
+ *out = *in
+ in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabWebHookCause.
+func (in *GitLabWebHookCause) DeepCopy() *GitLabWebHookCause {
+ if in == nil {
+ return nil
+ }
+ out := new(GitLabWebHookCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRefInfo) DeepCopyInto(out *GitRefInfo) {
+ *out = *in
+ in.GitBuildSource.DeepCopyInto(&out.GitBuildSource)
+ out.GitSourceRevision = in.GitSourceRevision
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRefInfo.
+func (in *GitRefInfo) DeepCopy() *GitRefInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRefInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitSourceRevision) DeepCopyInto(out *GitSourceRevision) {
+ *out = *in
+ out.Author = in.Author
+ out.Committer = in.Committer
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSourceRevision.
+func (in *GitSourceRevision) DeepCopy() *GitSourceRevision {
+ if in == nil {
+ return nil
+ }
+ out := new(GitSourceRevision)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageChangeCause) DeepCopyInto(out *ImageChangeCause) {
+ *out = *in
+ if in.FromRef != nil {
+ in, out := &in.FromRef, &out.FromRef
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeCause.
+func (in *ImageChangeCause) DeepCopy() *ImageChangeCause {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageChangeCause)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageChangeTrigger) DeepCopyInto(out *ImageChangeTrigger) {
+ *out = *in
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTrigger.
+func (in *ImageChangeTrigger) DeepCopy() *ImageChangeTrigger {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageChangeTrigger)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageChangeTriggerStatus) DeepCopyInto(out *ImageChangeTriggerStatus) {
+ *out = *in
+ out.From = in.From
+ in.LastTriggerTime.DeepCopyInto(&out.LastTriggerTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTriggerStatus.
+func (in *ImageChangeTriggerStatus) DeepCopy() *ImageChangeTriggerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageChangeTriggerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageLabel) DeepCopyInto(out *ImageLabel) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel.
+func (in *ImageLabel) DeepCopy() *ImageLabel {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageLabel)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageSource) DeepCopyInto(out *ImageSource) {
+ *out = *in
+ out.From = in.From
+ if in.As != nil {
+ in, out := &in.As, &out.As
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Paths != nil {
+ in, out := &in.Paths, &out.Paths
+ *out = make([]ImageSourcePath, len(*in))
+ copy(*out, *in)
+ }
+ if in.PullSecret != nil {
+ in, out := &in.PullSecret, &out.PullSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource.
+func (in *ImageSource) DeepCopy() *ImageSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageSourcePath) DeepCopyInto(out *ImageSourcePath) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSourcePath.
+func (in *ImageSourcePath) DeepCopy() *ImageSourcePath {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageSourcePath)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamTagReference) DeepCopyInto(out *ImageStreamTagReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagReference.
+func (in *ImageStreamTagReference) DeepCopy() *ImageStreamTagReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamTagReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JenkinsPipelineBuildStrategy) DeepCopyInto(out *JenkinsPipelineBuildStrategy) {
+ *out = *in
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineBuildStrategy.
+func (in *JenkinsPipelineBuildStrategy) DeepCopy() *JenkinsPipelineBuildStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(JenkinsPipelineBuildStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in OptionalNodeSelector) DeepCopyInto(out *OptionalNodeSelector) {
+ {
+ in := &in
+ *out = make(OptionalNodeSelector, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNodeSelector.
+func (in OptionalNodeSelector) DeepCopy() OptionalNodeSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(OptionalNodeSelector)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
+ *out = *in
+ if in.HTTPProxy != nil {
+ in, out := &in.HTTPProxy, &out.HTTPProxy
+ *out = new(string)
+ **out = **in
+ }
+ if in.HTTPSProxy != nil {
+ in, out := &in.HTTPSProxy, &out.HTTPSProxy
+ *out = new(string)
+ **out = **in
+ }
+ if in.NoProxy != nil {
+ in, out := &in.NoProxy, &out.NoProxy
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig.
+func (in *ProxyConfig) DeepCopy() *ProxyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretBuildSource) DeepCopyInto(out *SecretBuildSource) {
+ *out = *in
+ out.Secret = in.Secret
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBuildSource.
+func (in *SecretBuildSource) DeepCopy() *SecretBuildSource {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretBuildSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretLocalReference) DeepCopyInto(out *SecretLocalReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretLocalReference.
+func (in *SecretLocalReference) DeepCopy() *SecretLocalReference {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretLocalReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretSpec) DeepCopyInto(out *SecretSpec) {
+ *out = *in
+ out.SecretSource = in.SecretSource
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec.
+func (in *SecretSpec) DeepCopy() *SecretSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SourceBuildStrategy) DeepCopyInto(out *SourceBuildStrategy) {
+ *out = *in
+ out.From = in.From
+ if in.PullSecret != nil {
+ in, out := &in.PullSecret, &out.PullSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Incremental != nil {
+ in, out := &in.Incremental, &out.Incremental
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make([]BuildVolume, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceBuildStrategy.
+func (in *SourceBuildStrategy) DeepCopy() *SourceBuildStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(SourceBuildStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SourceControlUser) DeepCopyInto(out *SourceControlUser) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlUser.
+func (in *SourceControlUser) DeepCopy() *SourceControlUser {
+ if in == nil {
+ return nil
+ }
+ out := new(SourceControlUser)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SourceRevision) DeepCopyInto(out *SourceRevision) {
+ *out = *in
+ if in.Git != nil {
+ in, out := &in.Git, &out.Git
+ *out = new(GitSourceRevision)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRevision.
+func (in *SourceRevision) DeepCopy() *SourceRevision {
+ if in == nil {
+ return nil
+ }
+ out := new(SourceRevision)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SourceStrategyOptions) DeepCopyInto(out *SourceStrategyOptions) {
+ *out = *in
+ if in.Incremental != nil {
+ in, out := &in.Incremental, &out.Incremental
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyOptions.
+func (in *SourceStrategyOptions) DeepCopy() *SourceStrategyOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(SourceStrategyOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StageInfo) DeepCopyInto(out *StageInfo) {
+ *out = *in
+ in.StartTime.DeepCopyInto(&out.StartTime)
+ if in.Steps != nil {
+ in, out := &in.Steps, &out.Steps
+ *out = make([]StepInfo, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInfo.
+func (in *StageInfo) DeepCopy() *StageInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(StageInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StepInfo) DeepCopyInto(out *StepInfo) {
+ *out = *in
+ in.StartTime.DeepCopyInto(&out.StartTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepInfo.
+func (in *StepInfo) DeepCopy() *StepInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(StepInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebHookTrigger) DeepCopyInto(out *WebHookTrigger) {
+ *out = *in
+ if in.SecretReference != nil {
+ in, out := &in.SecretReference, &out.SecretReference
+ *out = new(SecretLocalReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHookTrigger.
+func (in *WebHookTrigger) DeepCopy() *WebHookTrigger {
+ if in == nil {
+ return nil
+ }
+ out := new(WebHookTrigger)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..72ff507b7d
--- /dev/null
+++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,692 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_BinaryBuildRequestOptions = map[string]string{
+ "": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive",
+ "revision.commit": "revision.commit is the value identifying a specific commit",
+ "revision.message": "revision.message is the description of a specific commit",
+ "revision.authorName": "revision.authorName of the source control user",
+ "revision.authorEmail": "revision.authorEmail of the source control user",
+ "revision.committerName": "revision.committerName of the source control user",
+ "revision.committerEmail": "revision.committerEmail of the source control user",
+}
+
+func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string {
+ return map_BinaryBuildRequestOptions
+}
+
+var map_BinaryBuildSource = map[string]string{
+ "": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.",
+ "asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.",
+}
+
+func (BinaryBuildSource) SwaggerDoc() map[string]string {
+ return map_BinaryBuildSource
+}
+
+var map_BitbucketWebHookCause = map[string]string{
+ "": "BitbucketWebHookCause has information about a Bitbucket webhook that triggered a build.",
+}
+
+func (BitbucketWebHookCause) SwaggerDoc() map[string]string {
+ return map_BitbucketWebHookCause
+}
+
+var map_Build = map[string]string{
+ "": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is all the inputs used to execute the build.",
+ "status": "status is the current status of the build.",
+}
+
+func (Build) SwaggerDoc() map[string]string {
+ return map_Build
+}
+
+var map_BuildCondition = map[string]string{
+ "": "BuildCondition describes the state of a build at a certain point.",
+ "type": "Type of build condition.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastUpdateTime": "The last time this condition was updated.",
+ "lastTransitionTime": "The last time the condition transitioned from one status to another.",
+ "reason": "The reason for the condition's last transition.",
+ "message": "A human readable message indicating details about the transition.",
+}
+
+func (BuildCondition) SwaggerDoc() map[string]string {
+ return map_BuildCondition
+}
+
+var map_BuildConfig = map[string]string{
+ "": "Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.",
+ "status": "status holds any relevant information about a build config",
+}
+
+func (BuildConfig) SwaggerDoc() map[string]string {
+ return map_BuildConfig
+}
+
+var map_BuildConfigList = map[string]string{
+ "": "BuildConfigList is a collection of BuildConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is a list of build configs",
+}
+
+func (BuildConfigList) SwaggerDoc() map[string]string {
+ return map_BuildConfigList
+}
+
+var map_BuildConfigSpec = map[string]string{
+ "": "BuildConfigSpec describes when and how builds are created",
+ "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.",
+ "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".",
+ "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.",
+ "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.",
+}
+
+func (BuildConfigSpec) SwaggerDoc() map[string]string {
+ return map_BuildConfigSpec
+}
+
+var map_BuildConfigStatus = map[string]string{
+ "": "BuildConfigStatus contains current state of the build config object.",
+ "lastVersion": "lastVersion is used to inform about number of last triggered build.",
+ "imageChangeTriggers": "ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.",
+}
+
+func (BuildConfigStatus) SwaggerDoc() map[string]string {
+ return map_BuildConfigStatus
+}
+
+var map_BuildList = map[string]string{
+ "": "BuildList is a collection of Builds.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is a list of builds",
+}
+
+func (BuildList) SwaggerDoc() map[string]string {
+ return map_BuildList
+}
+
+var map_BuildLog = map[string]string{
+ "": "BuildLog is the (unused) resource associated with the build log redirector\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+}
+
+func (BuildLog) SwaggerDoc() map[string]string {
+ return map_BuildLog
+}
+
+var map_BuildLogOptions = map[string]string{
+ "": "BuildLogOptions is the REST options for a build log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.",
+ "follow": "follow if true indicates that the build log should be streamed until the build terminates.",
+ "previous": "previous returns previous build logs. Defaults to false.",
+ "sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+ "sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+ "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
+ "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
+ "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
+ "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.",
+ "version": "version of the build for which to view logs.",
+ "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).",
+}
+
+func (BuildLogOptions) SwaggerDoc() map[string]string {
+ return map_BuildLogOptions
+}
+
+var map_BuildOutput = map[string]string{
+ "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.",
+ "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.",
+ "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).",
+ "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.",
+}
+
+func (BuildOutput) SwaggerDoc() map[string]string {
+ return map_BuildOutput
+}
+
+var map_BuildPostCommitSpec = map[string]string{
+ "": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test --verbose\",\n\t }\n\n\tThe above is a convenient form which is equivalent to:\n\n\t \"postCommit\": {\n\t \"command\": [\"/bin/sh\", \"-ic\"],\n\t \"args\": [\"rake test --verbose\"]\n\t }\n\n2. A command as the image entrypoint:\n\n\t \"postCommit\": {\n\t \"commit\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n\tCommand overrides the image entrypoint in the exec form, as documented in\n\tDocker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n\t \"postCommit\": {\n\t\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t\t }\n\n\t This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test $1\",\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is useful if you need to pass arguments that would otherwise be\n\thard to quote properly in the shell script. In the script, $0 will be\n\t\"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n\t \"postCommit\": {\n\t \"command\": [\"rake\", \"test\"],\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.",
+ "command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.",
+ "args": "args is a list of arguments that are provided to either Command, Script or the container image's default entrypoint. The arguments are placed immediately after the command to be run.",
+ "script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.",
+}
+
+func (BuildPostCommitSpec) SwaggerDoc() map[string]string {
+ return map_BuildPostCommitSpec
+}
+
+var map_BuildRequest = map[string]string{
+ "": "BuildRequest is the resource used to pass parameters to build generator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "revision": "revision is the information from the source for a specific repo snapshot.",
+ "triggeredByImage": "triggeredByImage is the Image that triggered this build.",
+ "from": "from is the reference to the ImageStreamTag that triggered the build.",
+ "binary": "binary indicates a request to build from a binary provided to the builder",
+ "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.",
+ "env": "env contains additional environment variables you want to pass into a builder container.",
+ "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.",
+ "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build",
+ "sourceStrategyOptions": "SourceStrategyOptions contains additional source-strategy specific options for the build",
+}
+
+func (BuildRequest) SwaggerDoc() map[string]string {
+ return map_BuildRequest
+}
+
+var map_BuildSource = map[string]string{
+ "": "BuildSource is the SCM used for the build.",
+ "type": "type of build input to accept",
+ "binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For container image builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and container image builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.",
+ "dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.",
+ "git": "git contains optional information about git build source",
+ "images": "images describes a set of images to be used to provide source for the build",
+ "contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.",
+ "sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.",
+ "secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.",
+ "configMaps": "configMaps represents a list of configMaps and their destinations that will be used for the build.",
+}
+
+func (BuildSource) SwaggerDoc() map[string]string {
+ return map_BuildSource
+}
+
+var map_BuildSpec = map[string]string{
+ "": "BuildSpec has the information to represent a build and also additional information about a build",
+ "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.",
+}
+
+func (BuildSpec) SwaggerDoc() map[string]string {
+ return map_BuildSpec
+}
+
+var map_BuildStatus = map[string]string{
+ "": "BuildStatus contains the status of a build",
+ "phase": "phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".",
+ "cancelled": "cancelled describes if a cancel event was triggered for the build.",
+ "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
+ "message": "message is a human-readable message indicating details about why the build has this status.",
+ "startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.",
+ "completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.",
+ "duration": "duration contains time.Duration object describing build time.",
+ "outputDockerImageReference": "outputDockerImageReference contains a reference to the container image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.",
+ "config": "config is an ObjectReference to the BuildConfig this Build is based on.",
+ "output": "output describes the container image the build has produced.",
+ "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.",
+ "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.",
+ "conditions": "Conditions represents the latest available observations of a build's current state.",
+}
+
+func (BuildStatus) SwaggerDoc() map[string]string {
+ return map_BuildStatus
+}
+
+var map_BuildStatusOutput = map[string]string{
+ "": "BuildStatusOutput contains the status of the built image.",
+ "to": "to describes the status of the built image being pushed to a registry.",
+}
+
+func (BuildStatusOutput) SwaggerDoc() map[string]string {
+ return map_BuildStatusOutput
+}
+
+var map_BuildStatusOutputTo = map[string]string{
+ "": "BuildStatusOutputTo describes the status of the built image with regards to image registry to which it was supposed to be pushed.",
+ "imageDigest": "imageDigest is the digest of the built container image. The digest uniquely identifies the image in the registry to which it was pushed.\n\nPlease note that this field may not always be set even if the push completes successfully - e.g. when the registry returns no digest or returns it in a format that the builder doesn't understand.",
+}
+
+func (BuildStatusOutputTo) SwaggerDoc() map[string]string {
+ return map_BuildStatusOutputTo
+}
+
+var map_BuildStrategy = map[string]string{
+ "": "BuildStrategy contains the details of how to perform a build.",
+ "type": "type is the kind of build strategy.",
+ "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.",
+ "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.",
+ "customStrategy": "customStrategy holds the parameters to the Custom build strategy",
+ "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines",
+}
+
+func (BuildStrategy) SwaggerDoc() map[string]string {
+ return map_BuildStrategy
+}
+
+var map_BuildTriggerCause = map[string]string{
+ "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.",
+ "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.",
+ "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.",
+ "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.",
+ "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.",
+ "gitlabWebHook": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.",
+ "bitbucketWebHook": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.",
+}
+
+func (BuildTriggerCause) SwaggerDoc() map[string]string {
+ return map_BuildTriggerCause
+}
+
+var map_BuildTriggerPolicy = map[string]string{
+ "": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.",
+ "type": "type is the type of build trigger. Valid values:\n\n- GitHub GitHubWebHookBuildTriggerType represents a trigger that launches builds on GitHub webhook invocations\n\n- Generic GenericWebHookBuildTriggerType represents a trigger that launches builds on generic webhook invocations\n\n- GitLab GitLabWebHookBuildTriggerType represents a trigger that launches builds on GitLab webhook invocations\n\n- Bitbucket BitbucketWebHookBuildTriggerType represents a trigger that launches builds on Bitbucket webhook invocations\n\n- ImageChange ImageChangeBuildTriggerType represents a trigger that launches builds on availability of a new version of an image\n\n- ConfigChange ConfigChangeBuildTriggerType will trigger a build on an initial build config creation WARNING: In the future the behavior will change to trigger a build on any config change",
+ "github": "github contains the parameters for a GitHub webhook type of trigger",
+ "generic": "generic contains the parameters for a Generic webhook type of trigger",
+ "imageChange": "imageChange contains parameters for an ImageChange type of trigger",
+ "gitlab": "GitLabWebHook contains the parameters for a GitLab webhook type of trigger",
+ "bitbucket": "BitbucketWebHook contains the parameters for a Bitbucket webhook type of trigger",
+}
+
+func (BuildTriggerPolicy) SwaggerDoc() map[string]string {
+ return map_BuildTriggerPolicy
+}
+
+var map_BuildVolume = map[string]string{
+ "": "BuildVolume describes a volume that is made available to build pods, such that it can be mounted into buildah's runtime environment. Only a subset of Kubernetes Volume sources are supported.",
+ "name": "name is a unique identifier for this BuildVolume. It must conform to the Kubernetes DNS label standard and be unique within the pod. Names that collide with those added by the build controller will result in a failed build with an error message detailing which name caused the error. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "source": "source represents the location and type of the mounted volume.",
+ "mounts": "mounts represents the location of the volume in the image build container",
+}
+
+func (BuildVolume) SwaggerDoc() map[string]string {
+ return map_BuildVolume
+}
+
+var map_BuildVolumeMount = map[string]string{
+ "": "BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment.",
+ "destinationPath": "destinationPath is the path within the buildah runtime environment at which the volume should be mounted. The transient mount within the build image and the backing volume will both be mounted read only. Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated by the builder process Paths that collide with those added by the build controller will result in a failed build with an error message detailing which path caused the error.",
+}
+
+func (BuildVolumeMount) SwaggerDoc() map[string]string {
+ return map_BuildVolumeMount
+}
+
+var map_BuildVolumeSource = map[string]string{
+ "": "BuildVolumeSource represents the source of a volume to mount Only one of its supported types may be specified at any given time.",
+ "type": "type is the BuildVolumeSourceType for the volume source. Type must match the populated volume source. Valid types are: Secret, ConfigMap",
+ "secret": "secret represents a Secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+ "configMap": "configMap represents a ConfigMap that should populate this volume",
+ "csi": "csi represents ephemeral storage provided by external CSI drivers which support this capability",
+}
+
+func (BuildVolumeSource) SwaggerDoc() map[string]string {
+ return map_BuildVolumeSource
+}
+
+var map_CommonSpec = map[string]string{
+ "": "CommonSpec encapsulates all the inputs necessary to represent a build.",
+ "serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount",
+ "source": "source describes the SCM in use.",
+ "revision": "revision is the information from the source for a specific repo snapshot. This is optional.",
+ "strategy": "strategy defines how to perform a build.",
+ "output": "output describes the container image the Strategy should produce.",
+ "resources": "resources computes resource requirements to execute the build.",
+ "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.",
+ "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer",
+ "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.",
+ "mountTrustedCA": "mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in the cluster's proxy configuration, into the build. This lets processes within a build trust components signed by custom PKI certificate authorities, such as private artifact repositories and HTTPS proxies.\n\nWhen this field is set to true, the contents of `/etc/pki/ca-trust` within the build are managed by the build container, and any changes to this directory or its subdirectories (for example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image.",
+}
+
+func (CommonSpec) SwaggerDoc() map[string]string {
+ return map_CommonSpec
+}
+
+var map_CommonWebHookCause = map[string]string{
+ "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.",
+ "revision": "Revision is the git source revision information of the trigger.",
+ "secret": "Secret is the obfuscated webhook secret that triggered a build.",
+}
+
+func (CommonWebHookCause) SwaggerDoc() map[string]string {
+ return map_CommonWebHookCause
+}
+
+var map_ConfigMapBuildSource = map[string]string{
+ "": "ConfigMapBuildSource describes a configmap and its destination directory that will be used only at the build time. The content of the configmap referenced here will be copied into the destination directory instead of mounting.",
+ "configMap": "configMap is a reference to an existing configmap that you want to use in your build.",
+ "destinationDir": "destinationDir is the directory where the files from the configmap should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.",
+}
+
+func (ConfigMapBuildSource) SwaggerDoc() map[string]string {
+ return map_ConfigMapBuildSource
+}
+
+var map_CustomBuildStrategy = map[string]string{
+ "": "CustomBuildStrategy defines input parameters specific to Custom build.",
+ "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled",
+ "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries",
+ "env": "env contains additional environment variables you want to pass into a builder container.",
+ "exposeDockerSocket": "exposeDockerSocket will allow running Docker commands (and build container images) from inside the container.",
+ "forcePull": "forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally",
+ "secrets": "secrets is a list of additional secrets that will be included in the build pod",
+ "buildAPIVersion": "buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder",
+}
+
+func (CustomBuildStrategy) SwaggerDoc() map[string]string {
+ return map_CustomBuildStrategy
+}
+
+var map_DockerBuildStrategy = map[string]string{
+ "": "DockerBuildStrategy defines input parameters specific to container image build.",
+ "from": "from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, this will replace the image in the last FROM directive of the file.",
+ "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries",
+ "noCache": "noCache if set to true indicates that the container image build must be executed with the --no-cache=true flag",
+ "env": "env contains additional environment variables you want to pass into a builder container.",
+ "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.",
+ "dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the container image, relative to the root of the context (contextDir). Defaults to `Dockerfile` if unset.",
+ "buildArgs": "buildArgs contains build arguments that will be resolved in the Dockerfile. See https://docs.docker.com/engine/reference/builder/#/arg for more details. NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field are ignored.",
+ "imageOptimizationPolicy": "imageOptimizationPolicy describes what optimizations the system can use when building images to reduce the final size or time spent building the image. The default policy is 'None' which means the final build image will be equivalent to an image created by the container image build API. The experimental policy 'SkipLayers' will avoid commiting new layers in between each image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' policy. An additional experimental policy 'SkipLayersAndWarn' is the same as 'SkipLayers' but simply warns if compatibility cannot be preserved.",
+ "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes",
+}
+
+func (DockerBuildStrategy) SwaggerDoc() map[string]string {
+ return map_DockerBuildStrategy
+}
+
+var map_DockerStrategyOptions = map[string]string{
+ "": "DockerStrategyOptions contains extra strategy options for container image builds",
+ "buildArgs": "Args contains any build arguments that are to be passed to Docker. See https://docs.docker.com/engine/reference/builder/#/arg for more details",
+ "noCache": "noCache overrides the docker-strategy noCache option in the build config",
+}
+
+func (DockerStrategyOptions) SwaggerDoc() map[string]string {
+ return map_DockerStrategyOptions
+}
+
+var map_GenericWebHookCause = map[string]string{
+ "": "GenericWebHookCause holds information about a generic WebHook that triggered a build.",
+ "revision": "revision is an optional field that stores the git source revision information of the generic webhook trigger when it is available.",
+ "secret": "secret is the obfuscated webhook secret that triggered a build.",
+}
+
+func (GenericWebHookCause) SwaggerDoc() map[string]string {
+ return map_GenericWebHookCause
+}
+
+var map_GenericWebHookEvent = map[string]string{
+ "": "GenericWebHookEvent is the payload expected for a generic webhook post",
+ "type": "type is the type of source repository",
+ "git": "git is the git information if the Type is BuildSourceGit",
+ "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.",
+ "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build",
+}
+
+func (GenericWebHookEvent) SwaggerDoc() map[string]string {
+ return map_GenericWebHookEvent
+}
+
+var map_GitBuildSource = map[string]string{
+ "": "GitBuildSource defines the parameters of a Git SCM",
+ "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run",
+ "ref": "ref is the branch/tag/ref to build.",
+}
+
+func (GitBuildSource) SwaggerDoc() map[string]string {
+ return map_GitBuildSource
+}
+
+var map_GitHubWebHookCause = map[string]string{
+ "": "GitHubWebHookCause has information about a GitHub webhook that triggered a build.",
+ "revision": "revision is the git revision information of the trigger.",
+ "secret": "secret is the obfuscated webhook secret that triggered a build.",
+}
+
+func (GitHubWebHookCause) SwaggerDoc() map[string]string {
+ return map_GitHubWebHookCause
+}
+
+var map_GitInfo = map[string]string{
+ "": "GitInfo is the aggregated git information for a generic webhook post",
+ "refs": "Refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs",
+}
+
+func (GitInfo) SwaggerDoc() map[string]string {
+ return map_GitInfo
+}
+
+var map_GitLabWebHookCause = map[string]string{
+ "": "GitLabWebHookCause has information about a GitLab webhook that triggered a build.",
+}
+
+func (GitLabWebHookCause) SwaggerDoc() map[string]string {
+ return map_GitLabWebHookCause
+}
+
+var map_GitRefInfo = map[string]string{
+ "": "GitRefInfo is a single ref",
+}
+
+func (GitRefInfo) SwaggerDoc() map[string]string {
+ return map_GitRefInfo
+}
+
+var map_GitSourceRevision = map[string]string{
+ "": "GitSourceRevision is the commit information from a git source for a build",
+ "commit": "commit is the commit hash identifying a specific commit",
+ "author": "author is the author of a specific commit",
+ "committer": "committer is the committer of a specific commit",
+ "message": "message is the description of a specific commit",
+}
+
+func (GitSourceRevision) SwaggerDoc() map[string]string {
+ return map_GitSourceRevision
+}
+
+var map_ImageChangeCause = map[string]string{
+ "": "ImageChangeCause contains information about the image that triggered a build",
+ "imageID": "imageID is the ID of the image that triggered a new build.",
+ "fromRef": "fromRef contains detailed information about an image that triggered a build.",
+}
+
+func (ImageChangeCause) SwaggerDoc() map[string]string {
+ return map_ImageChangeCause
+}
+
+var map_ImageChangeTrigger = map[string]string{
+ "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes",
+ "lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build This field is deprecated and will be removed in a future release. Deprecated",
+ "from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.",
+ "paused": "paused is true if this trigger is temporarily disabled. Optional.",
+}
+
+func (ImageChangeTrigger) SwaggerDoc() map[string]string {
+ return map_ImageChangeTrigger
+}
+
+var map_ImageChangeTriggerStatus = map[string]string{
+ "": "ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy specified in the BuildConfigSpec.Triggers struct.",
+ "lastTriggeredImageID": "lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started.",
+ "from": "from is the ImageStreamTag that is the source of the trigger.",
+ "lastTriggerTime": "lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. This field is only updated when this trigger specifically started a Build.",
+}
+
+func (ImageChangeTriggerStatus) SwaggerDoc() map[string]string {
+ return map_ImageChangeTriggerStatus
+}
+
+var map_ImageLabel = map[string]string{
+ "": "ImageLabel represents a label applied to the resulting image.",
+ "name": "name defines the name of the label. It must have non-zero length.",
+ "value": "value defines the literal value of the label.",
+}
+
+func (ImageLabel) SwaggerDoc() map[string]string {
+ return map_ImageLabel
+}
+
+var map_ImageSource = map[string]string{
+ "": "ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields).",
+ "from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.",
+ "as": "A list of image names that this source will be used in place of during a multi-stage container image build. For instance, a Dockerfile that uses \"COPY --from=nginx:latest\" will first check for an image source that has \"nginx:latest\" in this field before attempting to pull directly. If the Dockerfile does not reference an image source it is ignored. This field and paths may both be set, in which case the contents will be used twice.",
+ "paths": "paths is a list of source and destination paths to copy from the image. This content will be copied into the build context prior to starting the build. If no paths are set, the build context will not be altered.",
+ "pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.",
+}
+
+func (ImageSource) SwaggerDoc() map[string]string {
+ return map_ImageSource
+}
+
+var map_ImageSourcePath = map[string]string{
+ "": "ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.",
+ "sourcePath": "sourcePath is the absolute path of the file or directory inside the image to copy to the build directory. If the source path ends in /. then the content of the directory will be copied, but the directory itself will not be created at the destination.",
+ "destinationDir": "destinationDir is the relative directory within the build directory where files copied from the image are placed.",
+}
+
+func (ImageSourcePath) SwaggerDoc() map[string]string {
+ return map_ImageSourcePath
+}
+
+var map_ImageStreamTagReference = map[string]string{
+ "": "ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name.",
+ "namespace": "namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located",
+ "name": "name is the name of the ImageStreamTag for an ImageChangeTrigger",
+}
+
+func (ImageStreamTagReference) SwaggerDoc() map[string]string {
+ return map_ImageStreamTagReference
+}
+
+var map_JenkinsPipelineBuildStrategy = map[string]string{
+ "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines",
+ "jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.",
+ "jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.",
+ "env": "env contains additional environment variables you want to pass into a build pipeline.",
+}
+
+func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string {
+ return map_JenkinsPipelineBuildStrategy
+}
+
+var map_ProxyConfig = map[string]string{
+ "": "ProxyConfig defines what proxies to use for an operation",
+ "httpProxy": "httpProxy is a proxy used to reach the git repository over http",
+ "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https",
+ "noProxy": "noProxy is the list of domains for which the proxy should not be used",
+}
+
+func (ProxyConfig) SwaggerDoc() map[string]string {
+ return map_ProxyConfig
+}
+
+var map_SecretBuildSource = map[string]string{
+ "": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.",
+ "secret": "secret is a reference to an existing secret that you want to use in your build.",
+ "destinationDir": "destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.",
+}
+
+func (SecretBuildSource) SwaggerDoc() map[string]string {
+ return map_SecretBuildSource
+}
+
+var map_SecretLocalReference = map[string]string{
+ "": "SecretLocalReference contains information that points to the local secret being used",
+ "name": "Name is the name of the resource in the same namespace being referenced",
+}
+
+func (SecretLocalReference) SwaggerDoc() map[string]string {
+ return map_SecretLocalReference
+}
+
+var map_SecretSpec = map[string]string{
+ "": "SecretSpec specifies a secret to be included in a build pod and its corresponding mount point",
+ "secretSource": "secretSource is a reference to the secret",
+ "mountPath": "mountPath is the path at which to mount the secret",
+}
+
+func (SecretSpec) SwaggerDoc() map[string]string {
+ return map_SecretSpec
+}
+
+var map_SourceBuildStrategy = map[string]string{
+ "": "SourceBuildStrategy defines input parameters specific to an Source build.",
+ "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled",
+ "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries",
+ "env": "env contains additional environment variables you want to pass into a builder container.",
+ "scripts": "scripts is the location of Source scripts",
+ "incremental": "incremental flag forces the Source build to do incremental builds if true.",
+ "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.",
+ "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes",
+}
+
+func (SourceBuildStrategy) SwaggerDoc() map[string]string {
+ return map_SourceBuildStrategy
+}
+
+var map_SourceControlUser = map[string]string{
+ "": "SourceControlUser defines the identity of a user of source control",
+ "name": "name of the source control user",
+ "email": "email of the source control user",
+}
+
+func (SourceControlUser) SwaggerDoc() map[string]string {
+ return map_SourceControlUser
+}
+
+var map_SourceRevision = map[string]string{
+ "": "SourceRevision is the revision or commit information from the source for the build",
+ "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'",
+ "git": "Git contains information about git-based build source",
+}
+
+func (SourceRevision) SwaggerDoc() map[string]string {
+ return map_SourceRevision
+}
+
+var map_SourceStrategyOptions = map[string]string{
+ "": "SourceStrategyOptions contains extra strategy options for Source builds",
+ "incremental": "incremental overrides the source-strategy incremental option in the build config",
+}
+
+func (SourceStrategyOptions) SwaggerDoc() map[string]string {
+ return map_SourceStrategyOptions
+}
+
+var map_StageInfo = map[string]string{
+ "": "StageInfo contains details about a build stage.",
+ "name": "name is a unique identifier for each build stage that occurs.",
+ "startTime": "startTime is a timestamp representing the server time when this Stage started. It is represented in RFC3339 form and is in UTC.",
+ "durationMilliseconds": "durationMilliseconds identifies how long the stage took to complete in milliseconds. Note: the duration of a stage can exceed the sum of the duration of the steps within the stage as not all actions are accounted for in explicit build steps.",
+ "steps": "steps contains details about each step that occurs during a build stage including start time and duration in milliseconds.",
+}
+
+func (StageInfo) SwaggerDoc() map[string]string {
+ return map_StageInfo
+}
+
+var map_StepInfo = map[string]string{
+ "": "StepInfo contains details about a build step.",
+ "name": "name is a unique identifier for each build step.",
+ "startTime": "startTime is a timestamp representing the server time when this Step started. it is represented in RFC3339 form and is in UTC.",
+ "durationMilliseconds": "durationMilliseconds identifies how long the step took to complete in milliseconds.",
+}
+
+func (StepInfo) SwaggerDoc() map[string]string {
+ return map_StepInfo
+}
+
+var map_WebHookTrigger = map[string]string{
+ "": "WebHookTrigger is a trigger that gets invoked using a webhook type of post",
+ "secret": "secret used to validate requests. Deprecated: use SecretReference instead.",
+ "allowEnv": "allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook.",
+ "secretReference": "secretReference is a reference to a secret in the same namespace, containing the value to be validated when the webhook is invoked. The secret being referenced must contain a key named \"WebHookSecretKey\", the value of which will be checked against the value supplied in the webhook invocation.",
+}
+
+func (WebHookTrigger) SwaggerDoc() map[string]string {
+ return map_WebHookTrigger
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/cloudnetwork/.codegen.yaml b/vendor/github.com/openshift/api/cloudnetwork/.codegen.yaml
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/vendor/github.com/openshift/api/cloudnetwork/OWNERS b/vendor/github.com/openshift/api/cloudnetwork/OWNERS
new file mode 100644
index 0000000000..0bc20628a2
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/OWNERS
@@ -0,0 +1,6 @@
+reviewers:
+ - danwinship
+ - dcbw
+ - knobunc
+ - squeed
+ - abhat
diff --git a/vendor/github.com/openshift/api/cloudnetwork/install.go b/vendor/github.com/openshift/api/cloudnetwork/install.go
new file mode 100644
index 0000000000..f839ebf00b
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/install.go
@@ -0,0 +1,26 @@
+package cloudnetwork
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ cloudnetworkv1 "github.com/openshift/api/cloudnetwork/v1"
+)
+
+const (
+ GroupName = "cloud.network.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(cloudnetworkv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/Makefile b/vendor/github.com/openshift/api/cloudnetwork/v1/Makefile
new file mode 100644
index 0000000000..ef9799eaf2
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="cloud.network.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/doc.go b/vendor/github.com/openshift/api/cloudnetwork/v1/doc.go
new file mode 100644
index 0000000000..1d495ee24c
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/doc.go
@@ -0,0 +1,5 @@
+// Package v1 contains API Schema definitions for the cloud network v1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=cloud.network.openshift.io
+// +kubebuilder:validation:Optional
+package v1
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.pb.go b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.pb.go
new file mode 100644
index 0000000000..9635f70d08
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.pb.go
@@ -0,0 +1,1045 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/cloudnetwork/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *CloudPrivateIPConfig) Reset() { *m = CloudPrivateIPConfig{} }
+func (*CloudPrivateIPConfig) ProtoMessage() {}
+func (*CloudPrivateIPConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_454253a7ab01c6d0, []int{0}
+}
+func (m *CloudPrivateIPConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudPrivateIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudPrivateIPConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudPrivateIPConfig.Merge(m, src)
+}
+func (m *CloudPrivateIPConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudPrivateIPConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudPrivateIPConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudPrivateIPConfig proto.InternalMessageInfo
+
+func (m *CloudPrivateIPConfigList) Reset() { *m = CloudPrivateIPConfigList{} }
+func (*CloudPrivateIPConfigList) ProtoMessage() {}
+func (*CloudPrivateIPConfigList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_454253a7ab01c6d0, []int{1}
+}
+func (m *CloudPrivateIPConfigList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudPrivateIPConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudPrivateIPConfigList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudPrivateIPConfigList.Merge(m, src)
+}
+func (m *CloudPrivateIPConfigList) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudPrivateIPConfigList) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudPrivateIPConfigList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudPrivateIPConfigList proto.InternalMessageInfo
+
+func (m *CloudPrivateIPConfigSpec) Reset() { *m = CloudPrivateIPConfigSpec{} }
+func (*CloudPrivateIPConfigSpec) ProtoMessage() {}
+func (*CloudPrivateIPConfigSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_454253a7ab01c6d0, []int{2}
+}
+func (m *CloudPrivateIPConfigSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudPrivateIPConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudPrivateIPConfigSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudPrivateIPConfigSpec.Merge(m, src)
+}
+func (m *CloudPrivateIPConfigSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudPrivateIPConfigSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudPrivateIPConfigSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudPrivateIPConfigSpec proto.InternalMessageInfo
+
+func (m *CloudPrivateIPConfigStatus) Reset() { *m = CloudPrivateIPConfigStatus{} }
+func (*CloudPrivateIPConfigStatus) ProtoMessage() {}
+func (*CloudPrivateIPConfigStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_454253a7ab01c6d0, []int{3}
+}
+func (m *CloudPrivateIPConfigStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CloudPrivateIPConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CloudPrivateIPConfigStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudPrivateIPConfigStatus.Merge(m, src)
+}
+func (m *CloudPrivateIPConfigStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *CloudPrivateIPConfigStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudPrivateIPConfigStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudPrivateIPConfigStatus proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*CloudPrivateIPConfig)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfig")
+ proto.RegisterType((*CloudPrivateIPConfigList)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfigList")
+ proto.RegisterType((*CloudPrivateIPConfigSpec)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfigSpec")
+ proto.RegisterType((*CloudPrivateIPConfigStatus)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfigStatus")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/cloudnetwork/v1/generated.proto", fileDescriptor_454253a7ab01c6d0)
+}
+
+var fileDescriptor_454253a7ab01c6d0 = []byte{
+ // 483 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xc1, 0x6e, 0xd3, 0x30,
+ 0x18, 0xc7, 0xe3, 0xae, 0x9b, 0x86, 0x07, 0x08, 0x45, 0x1c, 0xa2, 0x1e, 0xbc, 0xaa, 0xa7, 0x5e,
+ 0xb0, 0xe9, 0x84, 0xd0, 0x0e, 0x88, 0x43, 0xca, 0x65, 0x12, 0x8c, 0x29, 0xdc, 0x10, 0x07, 0x5c,
+ 0xc7, 0x4d, 0x4d, 0x17, 0x3b, 0x8a, 0x9d, 0x22, 0x6e, 0x3c, 0x02, 0xef, 0xc0, 0xcb, 0xf4, 0xc0,
+ 0x61, 0xc7, 0x5d, 0x98, 0x68, 0x78, 0x11, 0x64, 0x37, 0x6d, 0x23, 0xd6, 0x69, 0x91, 0x7a, 0xcb,
+ 0xf7, 0x25, 0xff, 0xff, 0xef, 0xfb, 0xfe, 0x8e, 0x0c, 0x4f, 0x13, 0x61, 0x26, 0xc5, 0x08, 0x33,
+ 0x95, 0x12, 0x95, 0x71, 0xa9, 0x27, 0x62, 0x6c, 0x08, 0xcd, 0x04, 0x61, 0x97, 0xaa, 0x88, 0x25,
+ 0x37, 0x5f, 0x55, 0x3e, 0x25, 0xb3, 0x01, 0x49, 0xb8, 0xe4, 0x39, 0x35, 0x3c, 0xc6, 0x59, 0xae,
+ 0x8c, 0xf2, 0xfb, 0x1b, 0x25, 0x5e, 0x2b, 0x31, 0xcd, 0x04, 0xae, 0x2b, 0xf1, 0x6c, 0xd0, 0x79,
+ 0x56, 0x63, 0x24, 0x2a, 0x51, 0xc4, 0x19, 0x8c, 0x8a, 0xb1, 0xab, 0x5c, 0xe1, 0x9e, 0x96, 0xc6,
+ 0x9d, 0x17, 0xd3, 0x53, 0x8d, 0x85, 0xb2, 0x43, 0xa4, 0x94, 0x4d, 0x84, 0xe4, 0xf9, 0x37, 0x92,
+ 0x4d, 0x13, 0xdb, 0xd0, 0x24, 0xe5, 0x86, 0x6e, 0x19, 0xa7, 0x43, 0xee, 0x52, 0xe5, 0x85, 0x34,
+ 0x22, 0xe5, 0xb7, 0x04, 0x2f, 0xef, 0x13, 0x68, 0x36, 0xe1, 0x29, 0xfd, 0x5f, 0xd7, 0xfb, 0xd5,
+ 0x82, 0x4f, 0x87, 0x76, 0xc3, 0x8b, 0x5c, 0xcc, 0xa8, 0xe1, 0x67, 0x17, 0x43, 0x25, 0xc7, 0x22,
+ 0xf1, 0x3f, 0xc3, 0x43, 0x3b, 0x5c, 0x4c, 0x0d, 0x0d, 0x40, 0x17, 0xf4, 0x8f, 0x4e, 0x9e, 0xe3,
+ 0x25, 0x03, 0xd7, 0x19, 0x38, 0x9b, 0x26, 0xb6, 0xa1, 0xb1, 0xfd, 0x1a, 0xcf, 0x06, 0xf8, 0xfd,
+ 0xe8, 0x0b, 0x67, 0xe6, 0x1d, 0x37, 0x34, 0xf4, 0xe7, 0x37, 0xc7, 0x5e, 0x79, 0x73, 0x0c, 0x37,
+ 0xbd, 0x68, 0xed, 0xea, 0xc7, 0xb0, 0xad, 0x33, 0xce, 0x82, 0x96, 0x73, 0x0f, 0x71, 0xd3, 0x13,
+ 0xc0, 0xdb, 0xe6, 0xfd, 0x90, 0x71, 0x16, 0x3e, 0xac, 0x78, 0x6d, 0x5b, 0x45, 0xce, 0xdd, 0xbf,
+ 0x84, 0x07, 0xda, 0x50, 0x53, 0xe8, 0x60, 0xcf, 0x71, 0xde, 0xec, 0xc8, 0x71, 0x5e, 0xe1, 0xe3,
+ 0x8a, 0x74, 0xb0, 0xac, 0xa3, 0x8a, 0xd1, 0xfb, 0x0d, 0x60, 0xb0, 0x4d, 0xf6, 0x56, 0x68, 0xe3,
+ 0x7f, 0xba, 0x15, 0x29, 0x6e, 0x16, 0xa9, 0x55, 0xbb, 0x40, 0x9f, 0x54, 0xd8, 0xc3, 0x55, 0xa7,
+ 0x16, 0x27, 0x83, 0xfb, 0xc2, 0xf0, 0x54, 0x07, 0xad, 0xee, 0x5e, 0xff, 0xe8, 0xe4, 0xf5, 0x6e,
+ 0x7b, 0x86, 0x8f, 0x2a, 0xd4, 0xfe, 0x99, 0x35, 0x8d, 0x96, 0xde, 0xbd, 0x57, 0xdb, 0xd7, 0xb3,
+ 0x79, 0xfb, 0x5d, 0xd8, 0x96, 0x2a, 0xe6, 0x6e, 0xb5, 0x07, 0x9b, 0xb3, 0x38, 0x57, 0x31, 0x8f,
+ 0xdc, 0x9b, 0xde, 0x4f, 0x00, 0x3b, 0x77, 0x87, 0x7a, 0xbf, 0x81, 0xcf, 0x20, 0x64, 0x4a, 0xc6,
+ 0xc2, 0x08, 0x25, 0x57, 0x8b, 0x92, 0x66, 0x19, 0x0e, 0x57, 0xba, 0xcd, 0x5f, 0xb9, 0x6e, 0xe9,
+ 0xa8, 0x66, 0x1b, 0x9e, 0xcf, 0x17, 0xc8, 0xbb, 0x5a, 0x20, 0xef, 0x7a, 0x81, 0xbc, 0xef, 0x25,
+ 0x02, 0xf3, 0x12, 0x81, 0xab, 0x12, 0x81, 0xeb, 0x12, 0x81, 0x3f, 0x25, 0x02, 0x3f, 0xfe, 0x22,
+ 0xef, 0x63, 0xbf, 0xe9, 0x55, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xf0, 0xc5, 0x6e, 0x95,
+ 0x04, 0x00, 0x00,
+}
+
+func (m *CloudPrivateIPConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudPrivateIPConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudPrivateIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudPrivateIPConfigList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudPrivateIPConfigList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudPrivateIPConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudPrivateIPConfigSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudPrivateIPConfigSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudPrivateIPConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Node)
+ copy(dAtA[i:], m.Node)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Node)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CloudPrivateIPConfigStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CloudPrivateIPConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CloudPrivateIPConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Node)
+ copy(dAtA[i:], m.Node)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Node)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *CloudPrivateIPConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CloudPrivateIPConfigList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CloudPrivateIPConfigSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Node)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CloudPrivateIPConfigStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Node)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CloudPrivateIPConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CloudPrivateIPConfig{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CloudPrivateIPConfigSpec", "CloudPrivateIPConfigSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CloudPrivateIPConfigStatus", "CloudPrivateIPConfigStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudPrivateIPConfigList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]CloudPrivateIPConfig{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CloudPrivateIPConfig", "CloudPrivateIPConfig", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&CloudPrivateIPConfigList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudPrivateIPConfigSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CloudPrivateIPConfigSpec{`,
+ `Node:` + fmt.Sprintf("%v", this.Node) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CloudPrivateIPConfigStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&CloudPrivateIPConfigStatus{`,
+ `Node:` + fmt.Sprintf("%v", this.Node) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *CloudPrivateIPConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudPrivateIPConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudPrivateIPConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudPrivateIPConfigList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudPrivateIPConfigList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudPrivateIPConfigList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CloudPrivateIPConfig{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudPrivateIPConfigSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudPrivateIPConfigSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudPrivateIPConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Node = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CloudPrivateIPConfigStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CloudPrivateIPConfigStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CloudPrivateIPConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Node = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto
new file mode 100644
index 0000000000..dc4557883f
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto
@@ -0,0 +1,89 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.cloudnetwork.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/cloudnetwork/v1";
+
+// CloudPrivateIPConfig performs an assignment of a private IP address to the
+// primary NIC associated with cloud VMs. This is done by specifying the IP and
+// Kubernetes node which the IP should be assigned to. This CRD is intended to
+// be used by the network plugin which manages the cluster network. The spec
+// side represents the desired state requested by the network plugin, and the
+// status side represents the current state that this CRD's controller has
+// executed. No users will have permission to modify it, and if a cluster-admin
+// decides to edit it for some reason, their changes will be overwritten the
+// next time the network plugin reconciles the object. Note: the CR's name
+// must specify the requested private IP address (can be IPv4 or IPv6).
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=cloudprivateipconfigs,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/859
+// +openshift:file-pattern=operatorOrdering=001
+// +openshift:compatibility-gen:level=1
+message CloudPrivateIPConfig {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is the definition of the desired private IP request.
+ // +kubebuilder:validation:Required
+ // +required
+ optional CloudPrivateIPConfigSpec spec = 2;
+
+ // status is the observed status of the desired private IP request. Read-only.
+ // +kubebuilder:validation:Optional
+ // +optional
+ optional CloudPrivateIPConfigStatus status = 3;
+}
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +resource:path=cloudprivateipconfig
+// CloudPrivateIPConfigList is the list of CloudPrivateIPConfigList.
+// +openshift:compatibility-gen:level=1
+message CloudPrivateIPConfigList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of CloudPrivateIPConfig.
+ repeated CloudPrivateIPConfig items = 2;
+}
+
+// CloudPrivateIPConfigSpec consists of a node name which the private IP should be assigned to.
+// +k8s:openapi-gen=true
+message CloudPrivateIPConfigSpec {
+ // node is the node name, as specified by the Kubernetes field: node.metadata.name
+ // +kubebuilder:validation:Optional
+ // +optional
+ optional string node = 1;
+}
+
+// CloudPrivateIPConfigStatus specifies the node assignment together with its assignment condition.
+// +k8s:openapi-gen=true
+message CloudPrivateIPConfigStatus {
+ // node is the node name, as specified by the Kubernetes field: node.metadata.name
+ // +kubebuilder:validation:Optional
+ // +optional
+ optional string node = 1;
+
+ // condition is the assignment condition of the private IP and its status
+ // +kubebuilder:validation:Required
+ // +required
+ repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/register.go b/vendor/github.com/openshift/api/cloudnetwork/v1/register.go
new file mode 100644
index 0000000000..734101c8e5
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/register.go
@@ -0,0 +1,37 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "cloud.network.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = SchemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CloudPrivateIPConfig{},
+ &CloudPrivateIPConfigList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go
new file mode 100644
index 0000000000..4c19e44c3f
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go
@@ -0,0 +1,93 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// CloudPrivateIPConfig performs an assignment of a private IP address to the
+// primary NIC associated with cloud VMs. This is done by specifying the IP and
+// Kubernetes node which the IP should be assigned to. This CRD is intended to
+// be used by the network plugin which manages the cluster network. The spec
+// side represents the desired state requested by the network plugin, and the
+// status side represents the current state that this CRD's controller has
+// executed. No users will have permission to modify it, and if a cluster-admin
+// decides to edit it for some reason, their changes will be overwritten the
+// next time the network plugin reconciles the object. Note: the CR's name
+// must specify the requested private IP address (can be IPv4 or IPv6).
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=cloudprivateipconfigs,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/859
+// +openshift:file-pattern=operatorOrdering=001
+// +openshift:compatibility-gen:level=1
+type CloudPrivateIPConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // spec is the definition of the desired private IP request.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec CloudPrivateIPConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // status is the observed status of the desired private IP request. Read-only.
+ // +kubebuilder:validation:Optional
+ // +optional
+ Status CloudPrivateIPConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// CloudPrivateIPConfigSpec consists of a node name which the private IP should be assigned to.
+// +k8s:openapi-gen=true
+type CloudPrivateIPConfigSpec struct {
+ // node is the node name, as specified by the Kubernetes field: node.metadata.name
+ // +kubebuilder:validation:Optional
+ // +optional
+ Node string `json:"node" protobuf:"bytes,1,opt,name=node"`
+}
+
+// CloudPrivateIPConfigStatus specifies the node assignment together with its assignment condition.
+// +k8s:openapi-gen=true
+type CloudPrivateIPConfigStatus struct {
+ // node is the node name, as specified by the Kubernetes field: node.metadata.name
+ // +kubebuilder:validation:Optional
+ // +optional
+ Node string `json:"node" protobuf:"bytes,1,opt,name=node"`
+ // condition is the assignment condition of the private IP and its status
+ // +kubebuilder:validation:Required
+ // +required
+ Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"`
+}
+
+// CloudPrivateIPConfigConditionType specifies the current condition type of the CloudPrivateIPConfig
+type CloudPrivateIPConfigConditionType string
+
+const (
+ // Assigned is the condition type of the cloud private IP request.
+ // It is paired with the following ConditionStatus:
+ // - True - in the case of a successful assignment
+ // - False - in the case of a failed assignment
+ // - Unknown - in the case of a pending assignment
+ Assigned CloudPrivateIPConfigConditionType = "Assigned"
+)
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +resource:path=cloudprivateipconfig
+// CloudPrivateIPConfigList is the list of CloudPrivateIPConfigList.
+// +openshift:compatibility-gen:level=1
+type CloudPrivateIPConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // List of CloudPrivateIPConfig.
+ Items []CloudPrivateIPConfig `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..092825f352
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go
@@ -0,0 +1,111 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudPrivateIPConfig) DeepCopyInto(out *CloudPrivateIPConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfig.
+func (in *CloudPrivateIPConfig) DeepCopy() *CloudPrivateIPConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudPrivateIPConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudPrivateIPConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudPrivateIPConfigList) DeepCopyInto(out *CloudPrivateIPConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CloudPrivateIPConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfigList.
+func (in *CloudPrivateIPConfigList) DeepCopy() *CloudPrivateIPConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudPrivateIPConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudPrivateIPConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudPrivateIPConfigSpec) DeepCopyInto(out *CloudPrivateIPConfigSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfigSpec.
+func (in *CloudPrivateIPConfigSpec) DeepCopy() *CloudPrivateIPConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudPrivateIPConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudPrivateIPConfigStatus) DeepCopyInto(out *CloudPrivateIPConfigStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfigStatus.
+func (in *CloudPrivateIPConfigStatus) DeepCopy() *CloudPrivateIPConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudPrivateIPConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..18b16994d9
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,21 @@
+cloudprivateipconfigs.cloud.network.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/859
+ CRDName: cloudprivateipconfigs.cloud.network.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "001"
+ FilenameRunLevel: ""
+ GroupName: cloud.network.openshift.io
+ HasStatus: true
+ KindName: CloudPrivateIPConfig
+ Labels: {}
+ PluralName: cloudprivateipconfigs
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..6a2f659ca2
--- /dev/null
+++ b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,54 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_CloudPrivateIPConfig = map[string]string{
+ "": "CloudPrivateIPConfig performs an assignment of a private IP address to the primary NIC associated with cloud VMs. This is done by specifying the IP and Kubernetes node which the IP should be assigned to. This CRD is intended to be used by the network plugin which manages the cluster network. The spec side represents the desired state requested by the network plugin, and the status side represents the current state that this CRD's controller has executed. No users will have permission to modify it, and if a cluster-admin decides to edit it for some reason, their changes will be overwritten the next time the network plugin reconciles the object. Note: the CR's name must specify the requested private IP address (can be IPv4 or IPv6).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the definition of the desired private IP request.",
+ "status": "status is the observed status of the desired private IP request. Read-only.",
+}
+
+func (CloudPrivateIPConfig) SwaggerDoc() map[string]string {
+ return map_CloudPrivateIPConfig
+}
+
+var map_CloudPrivateIPConfigList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). CloudPrivateIPConfigList is the list of CloudPrivateIPConfigList.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "List of CloudPrivateIPConfig.",
+}
+
+func (CloudPrivateIPConfigList) SwaggerDoc() map[string]string {
+ return map_CloudPrivateIPConfigList
+}
+
+var map_CloudPrivateIPConfigSpec = map[string]string{
+ "": "CloudPrivateIPConfigSpec consists of a node name which the private IP should be assigned to.",
+ "node": "node is the node name, as specified by the Kubernetes field: node.metadata.name",
+}
+
+func (CloudPrivateIPConfigSpec) SwaggerDoc() map[string]string {
+ return map_CloudPrivateIPConfigSpec
+}
+
+var map_CloudPrivateIPConfigStatus = map[string]string{
+ "": "CloudPrivateIPConfigStatus specifies the node assignment together with its assignment condition.",
+ "node": "node is the node name, as specified by the Kubernetes field: node.metadata.name",
+ "conditions": "condition is the assignment condition of the private IP and its status",
+}
+
+func (CloudPrivateIPConfigStatus) SwaggerDoc() map[string]string {
+ return map_CloudPrivateIPConfigStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/config/.codegen.yaml b/vendor/github.com/openshift/api/config/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/config/install.go b/vendor/github.com/openshift/api/config/install.go
new file mode 100644
index 0000000000..1c3c677477
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/install.go
@@ -0,0 +1,27 @@
+package config
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ configv1 "github.com/openshift/api/config/v1"
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+const (
+ GroupName = "config.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(configv1.Install, configv1alpha1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/config/v1/Makefile b/vendor/github.com/openshift/api/config/v1/Makefile
new file mode 100644
index 0000000000..66bf636305
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go
new file mode 100644
index 0000000000..f994547583
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/doc.go
@@ -0,0 +1,9 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+// +openshift:featuregated-schema-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=config.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go
new file mode 100644
index 0000000000..61302592ea
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/register.go
@@ -0,0 +1,78 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "config.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &APIServer{},
+ &APIServerList{},
+ &Authentication{},
+ &AuthenticationList{},
+ &Build{},
+ &BuildList{},
+ &ClusterOperator{},
+ &ClusterOperatorList{},
+ &ClusterVersion{},
+ &ClusterVersionList{},
+ &Console{},
+ &ConsoleList{},
+ &DNS{},
+ &DNSList{},
+ &FeatureGate{},
+ &FeatureGateList{},
+ &Image{},
+ &ImageList{},
+ &Infrastructure{},
+ &InfrastructureList{},
+ &Ingress{},
+ &IngressList{},
+ &Node{},
+ &NodeList{},
+ &Network{},
+ &NetworkList{},
+ &OAuth{},
+ &OAuthList{},
+ &OperatorHub{},
+ &OperatorHubList{},
+ &Project{},
+ &ProjectList{},
+ &Proxy{},
+ &ProxyList{},
+ &Scheduler{},
+ &SchedulerList{},
+ &ImageContentPolicy{},
+ &ImageContentPolicyList{},
+ &ImageDigestMirrorSet{},
+ &ImageDigestMirrorSetList{},
+ &ImageTagMirrorSet{},
+ &ImageTagMirrorSetList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go
new file mode 100644
index 0000000000..6a5718c1db
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/stringsource.go
@@ -0,0 +1,31 @@
+package v1
+
+import "encoding/json"
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+// If the value is a string, it sets the Value field of the StringSource.
+// Otherwise, it is unmarshaled into the StringSourceSpec struct
+func (s *StringSource) UnmarshalJSON(value []byte) error {
+ // If we can unmarshal to a simple string, just set the value
+ var simpleValue string
+ if err := json.Unmarshal(value, &simpleValue); err == nil {
+ s.Value = simpleValue
+ return nil
+ }
+
+ // Otherwise do the full struct unmarshal
+ return json.Unmarshal(value, &s.StringSourceSpec)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string.
+// Otherwise, the StringSourceSpec struct is marshaled as a JSON object.
+func (s *StringSource) MarshalJSON() ([]byte, error) {
+ // If we have only a cleartext value set, do a simple string marshal
+ if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) {
+ return json.Marshal(s.Value)
+ }
+
+ // Otherwise do the full struct marshal of the externalized bits
+ return json.Marshal(s.StringSourceSpec)
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go
new file mode 100644
index 0000000000..6fb1b9adc9
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types.go
@@ -0,0 +1,430 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// ConfigMapFileReference references a config map in a specific namespace.
+// The namespace must be specified at the point of use.
+type ConfigMapFileReference struct {
+ Name string `json:"name"`
+ // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.
+ Key string `json:"key,omitempty"`
+}
+
+// ConfigMapNameReference references a config map in a specific namespace.
+// The namespace must be specified at the point of use.
+type ConfigMapNameReference struct {
+ // name is the metadata.name of the referenced config map
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+}
+
+// SecretNameReference references a secret in a specific namespace.
+// The namespace must be specified at the point of use.
+type SecretNameReference struct {
+ // name is the metadata.name of the referenced secret
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+}
+
+// HTTPServingInfo holds configuration for serving HTTP
+type HTTPServingInfo struct {
+ // ServingInfo is the HTTP serving information
+ ServingInfo `json:",inline"`
+ // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.
+ MaxRequestsInFlight int64 `json:"maxRequestsInFlight"`
+ // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if
+ // -1 there is no limit on requests.
+ RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"`
+}
+
+// ServingInfo holds information about serving web pages
+type ServingInfo struct {
+ // BindAddress is the ip:port to serve on
+ BindAddress string `json:"bindAddress"`
+ // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
+ // "tcp4", and "tcp6"
+ BindNetwork string `json:"bindNetwork"`
+ // CertInfo is the TLS cert info for serving secure traffic.
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+ // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
+ // +optional
+ ClientCA string `json:"clientCA,omitempty"`
+ // NamedCertificates is a list of certificates to use to secure requests to specific hostnames
+ NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"`
+ // MinTLSVersion is the minimum TLS version supported.
+ // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
+ MinTLSVersion string `json:"minTLSVersion,omitempty"`
+ // CipherSuites contains an overridden list of ciphers for the server to support.
+ // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants
+ CipherSuites []string `json:"cipherSuites,omitempty"`
+}
+
+// CertInfo relates a certificate with a private key
+type CertInfo struct {
+ // CertFile is a file containing a PEM-encoded certificate
+ CertFile string `json:"certFile"`
+ // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile
+ KeyFile string `json:"keyFile"`
+}
+
+// NamedCertificate specifies a certificate/key, and the names it should be served for
+type NamedCertificate struct {
+ // Names is a list of DNS names this certificate should be used to secure
+ // A name can be a normal DNS name, or can contain leading wildcard segments.
+ Names []string `json:"names,omitempty"`
+ // CertInfo is the TLS cert info for serving secure traffic
+ CertInfo `json:",inline"`
+}
+
+// LeaderElection provides information to elect a leader
+type LeaderElection struct {
+ // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case.
+ Disable bool `json:"disable,omitempty"`
+ // namespace indicates which namespace the resource is in
+ Namespace string `json:"namespace,omitempty"`
+ // name indicates what name to use for the resource
+ Name string `json:"name,omitempty"`
+
+ // leaseDuration is the duration that non-leader candidates will wait
+ // after observing a leadership renewal until attempting to acquire
+ // leadership of a led but unrenewed leader slot. This is effectively the
+ // maximum duration that a leader can be stopped before it is replaced
+ // by another candidate. This is only applicable if leader election is
+ // enabled.
+ // +nullable
+ LeaseDuration metav1.Duration `json:"leaseDuration"`
+ // renewDeadline is the interval between attempts by the acting master to
+ // renew a leadership slot before it stops leading. This must be less
+ // than or equal to the lease duration. This is only applicable if leader
+ // election is enabled.
+ // +nullable
+ RenewDeadline metav1.Duration `json:"renewDeadline"`
+ // retryPeriod is the duration the clients should wait between attempting
+ // acquisition and renewal of a leadership. This is only applicable if
+ // leader election is enabled.
+ // +nullable
+ RetryPeriod metav1.Duration `json:"retryPeriod"`
+}
+
+// StringSource allows specifying a string inline, or externally via env var or file.
+// When it contains only a string value, it marshals to a simple JSON string.
+type StringSource struct {
+ // StringSourceSpec specifies the string value, or external location
+ StringSourceSpec `json:",inline"`
+}
+
+// StringSourceSpec specifies a string value, or external location
+type StringSourceSpec struct {
+ // Value specifies the cleartext value, or an encrypted value if keyFile is specified.
+ Value string `json:"value"`
+
+ // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.
+ Env string `json:"env"`
+
+ // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.
+ File string `json:"file"`
+
+ // KeyFile references a file containing the key to use to decrypt the value.
+ KeyFile string `json:"keyFile"`
+}
+
+// RemoteConnectionInfo holds information necessary for establishing a remote connection
+type RemoteConnectionInfo struct {
+ // URL is the remote URL to connect to
+ URL string `json:"url"`
+ // CA is the CA for verifying TLS connections
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information to present
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+}
+
+type AdmissionConfig struct {
+ PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"`
+
+ // enabledPlugins is a list of admission plugins that must be on in addition to the default list.
+ // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon
+ // and can result in performance penalties and unexpected behavior.
+ EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"`
+
+ // disabledPlugins is a list of admission plugins that must be off. Putting something in this list
+ // is almost always a mistake and likely to result in cluster instability.
+ DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"`
+}
+
+// AdmissionPluginConfig holds the necessary configuration options for admission plugins
+type AdmissionPluginConfig struct {
+ // Location is the path to a configuration file that contains the plugin's
+ // configuration
+ Location string `json:"location"`
+
+ // Configuration is an embedded configuration object to be used as the plugin's
+ // configuration. If present, it will be used instead of the path to the configuration file.
+ // +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
+ Configuration runtime.RawExtension `json:"configuration"`
+}
+
+type LogFormatType string
+
+type WebHookModeType string
+
+const (
+ // LogFormatLegacy saves event in 1-line text format.
+ LogFormatLegacy LogFormatType = "legacy"
+ // LogFormatJson saves event in structured json format.
+ LogFormatJson LogFormatType = "json"
+
+ // WebHookModeBatch indicates that the webhook should buffer audit events
+ // internally, sending batch updates either once a certain number of
+ // events have been received or a certain amount of time has passed.
+ WebHookModeBatch WebHookModeType = "batch"
+ // WebHookModeBlocking causes the webhook to block on every attempt to process
+ // a set of events. This causes requests to the API server to wait for a
+ // round trip to the external audit service before sending a response.
+ WebHookModeBlocking WebHookModeType = "blocking"
+)
+
+// AuditConfig holds configuration for the audit capabilities
+type AuditConfig struct {
+ // If this flag is set, audit log will be printed in the logs.
+ // The logs contains, method, user and a requested URL.
+ Enabled bool `json:"enabled"`
+ // All requests coming to the apiserver will be logged to this file.
+ AuditFilePath string `json:"auditFilePath"`
+ // Maximum number of days to retain old log files based on the timestamp encoded in their filename.
+ MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"`
+ // Maximum number of old log files to retain.
+ MaximumRetainedFiles int32 `json:"maximumRetainedFiles"`
+ // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.
+ MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"`
+
+ // PolicyFile is a path to the file that defines the audit policy configuration.
+ PolicyFile string `json:"policyFile"`
+ // PolicyConfiguration is an embedded policy configuration object to be used
+ // as the audit policy configuration. If present, it will be used instead of
+ // the path to the policy file.
+ // +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
+ PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"`
+
+ // Format of saved audits (legacy or json).
+ LogFormat LogFormatType `json:"logFormat"`
+
+ // Path to a .kubeconfig formatted file that defines the audit webhook configuration.
+ WebHookKubeConfig string `json:"webHookKubeConfig"`
+ // Strategy for sending audit events (block or batch).
+ WebHookMode WebHookModeType `json:"webHookMode"`
+}
+
+// EtcdConnectionInfo holds information necessary for connecting to an etcd server
+type EtcdConnectionInfo struct {
+ // URLs are the URLs for etcd
+ URLs []string `json:"urls,omitempty"`
+ // CA is a file containing trusted roots for the etcd server certificates
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information for securing communication to etcd
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+}
+
+type EtcdStorageConfig struct {
+ EtcdConnectionInfo `json:",inline"`
+
+ // StoragePrefix is the path within etcd that the OpenShift resources will
+ // be rooted under. This value, if changed, will mean existing objects in etcd will
+ // no longer be located.
+ StoragePrefix string `json:"storagePrefix"`
+}
+
+// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd
+type GenericAPIServerConfig struct {
+ // servingInfo describes how to start serving
+ ServingInfo HTTPServingInfo `json:"servingInfo"`
+
+ // corsAllowedOrigins
+ CORSAllowedOrigins []string `json:"corsAllowedOrigins"`
+
+ // auditConfig describes how to configure audit information
+ AuditConfig AuditConfig `json:"auditConfig"`
+
+ // storageConfig contains information about how to use
+ StorageConfig EtcdStorageConfig `json:"storageConfig"`
+
+ // admissionConfig holds information about how to configure admission.
+ AdmissionConfig AdmissionConfig `json:"admission"`
+
+ KubeClientConfig KubeClientConfig `json:"kubeClientConfig"`
+}
+
+type KubeClientConfig struct {
+ // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config
+ KubeConfig string `json:"kubeConfig"`
+
+ // connectionOverrides specifies client overrides for system components to loop back to this master.
+ ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"`
+}
+
+type ClientConnectionOverrides struct {
+ // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
+ // default value of 'application/json'. This field will control all connections to the server used by a particular
+ // client.
+ AcceptContentTypes string `json:"acceptContentTypes"`
+ // contentType is the content type used when sending data to the server from this client.
+ ContentType string `json:"contentType"`
+
+ // qps controls the number of queries per second allowed for this connection.
+ QPS float32 `json:"qps"`
+ // burst allows extra queries to accumulate when a client is exceeding its rate.
+ Burst int32 `json:"burst"`
+}
+
+// GenericControllerConfig provides information to configure a controller
+type GenericControllerConfig struct {
+ // ServingInfo is the HTTP serving information for the controller's endpoints
+ ServingInfo HTTPServingInfo `json:"servingInfo"`
+
+ // leaderElection provides information to elect a leader. Only override this if you have a specific need
+ LeaderElection LeaderElection `json:"leaderElection"`
+
+ // authentication allows configuration of authentication for the endpoints
+ Authentication DelegatedAuthentication `json:"authentication"`
+ // authorization allows configuration of authentication for the endpoints
+ Authorization DelegatedAuthorization `json:"authorization"`
+}
+
+// DelegatedAuthentication allows authentication to be disabled.
+type DelegatedAuthentication struct {
+ // disabled indicates that authentication should be disabled. By default it will use delegated authentication.
+ Disabled bool `json:"disabled,omitempty"`
+}
+
+// DelegatedAuthorization allows authorization to be disabled.
+type DelegatedAuthorization struct {
+ // disabled indicates that authorization should be disabled. By default it will use delegated authorization.
+ Disabled bool `json:"disabled,omitempty"`
+}
+type RequiredHSTSPolicy struct {
+ // namespaceSelector specifies a label selector such that the policy applies only to those routes that
+ // are in namespaces with labels that match the selector, and are in one of the DomainPatterns.
+ // Defaults to the empty LabelSelector, which matches everything.
+ // +optional
+ NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
+
+ // domainPatterns is a list of domains for which the desired HSTS annotations are required.
+ // If domainPatterns is specified and a route is created with a spec.host matching one of the domains,
+ // the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.
+ //
+ // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com.
+ // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:Required
+ // +required
+ DomainPatterns []string `json:"domainPatterns"`
+
+ // maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts.
+ // If set to 0, it negates the effect, and hosts are removed as HSTS hosts.
+ // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts.
+ // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS
+ // policy will eventually expire on that client.
+ MaxAge MaxAgePolicy `json:"maxAge"`
+
+ // preloadPolicy directs the client to include hosts in its host preload list so that
+ // it never needs to do an initial load to get the HSTS header (note that this is not defined
+ // in RFC 6797 and is therefore client implementation-dependent).
+ // +optional
+ PreloadPolicy PreloadPolicy `json:"preloadPolicy,omitempty"`
+
+ // includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's
+ // domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains:
+ // - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com
+ // - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com
+ // - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com
+ // - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com
+ // +optional
+ IncludeSubDomainsPolicy IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"`
+}
+
+// MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy
+type MaxAgePolicy struct {
+ // The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age
+ // This value can be left unspecified, in which case no upper limit is enforced.
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=2147483647
+ LargestMaxAge *int32 `json:"largestMaxAge,omitempty"`
+
+ // The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age
+ // Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary
+ // tool for administrators to quickly correct mistakes.
+ // This value can be left unspecified, in which case no lower limit is enforced.
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=2147483647
+ SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"`
+}
+
+// PreloadPolicy contains a value for specifying a compliant HSTS preload policy for the enclosing RequiredHSTSPolicy
+// +kubebuilder:validation:Enum=RequirePreload;RequireNoPreload;NoOpinion
+type PreloadPolicy string
+
+const (
+ // RequirePreloadPolicy means HSTS "preload" is required by the RequiredHSTSPolicy
+ RequirePreloadPolicy PreloadPolicy = "RequirePreload"
+
+ // RequireNoPreloadPolicy means HSTS "preload" is forbidden by the RequiredHSTSPolicy
+ RequireNoPreloadPolicy PreloadPolicy = "RequireNoPreload"
+
+ // NoOpinionPreloadPolicy means HSTS "preload" doesn't matter to the RequiredHSTSPolicy
+ NoOpinionPreloadPolicy PreloadPolicy = "NoOpinion"
+)
+
+// IncludeSubDomainsPolicy contains a value for specifying a compliant HSTS includeSubdomains policy
+// for the enclosing RequiredHSTSPolicy
+// +kubebuilder:validation:Enum=RequireIncludeSubDomains;RequireNoIncludeSubDomains;NoOpinion
+type IncludeSubDomainsPolicy string
+
+const (
+ // RequireIncludeSubDomains means HSTS "includeSubDomains" is required by the RequiredHSTSPolicy
+ RequireIncludeSubDomains IncludeSubDomainsPolicy = "RequireIncludeSubDomains"
+
+ // RequireNoIncludeSubDomains means HSTS "includeSubDomains" is forbidden by the RequiredHSTSPolicy
+ RequireNoIncludeSubDomains IncludeSubDomainsPolicy = "RequireNoIncludeSubDomains"
+
+ // NoOpinionIncludeSubDomains means HSTS "includeSubDomains" doesn't matter to the RequiredHSTSPolicy
+ NoOpinionIncludeSubDomains IncludeSubDomainsPolicy = "NoOpinion"
+)
+
+// IBMCloudServiceName contains a value specifying the name of an IBM Cloud Service,
+// which are used by MAPI, CIRO, CIO, Installer, etc.
+// +kubebuilder:validation:Enum=CIS;COS;DNSServices;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;ResourceController;ResourceManager;VPC
+type IBMCloudServiceName string
+
+const (
+ // IBMCloudServiceCIS is the name for IBM Cloud CIS.
+ IBMCloudServiceCIS IBMCloudServiceName = "CIS"
+ // IBMCloudServiceCOS is the name for IBM Cloud COS.
+ IBMCloudServiceCOS IBMCloudServiceName = "COS"
+ // IBMCloudServiceDNSServices is the name for IBM Cloud DNS Services.
+ IBMCloudServiceDNSServices IBMCloudServiceName = "DNSServices"
+ // IBMCloudServiceGlobalSearch is the name for IBM Cloud Global Search.
+ IBMCloudServiceGlobalSearch IBMCloudServiceName = "GlobalSearch"
+ // IBMCloudServiceGlobalTagging is the name for IBM Cloud Global Tagging.
+ IBMCloudServiceGlobalTagging IBMCloudServiceName = "GlobalTagging"
+ // IBMCloudServiceHyperProtect is the name for IBM Cloud Hyper Protect.
+ IBMCloudServiceHyperProtect IBMCloudServiceName = "HyperProtect"
+ // IBMCloudServiceIAM is the name for IBM Cloud IAM.
+ IBMCloudServiceIAM IBMCloudServiceName = "IAM"
+ // IBMCloudServiceKeyProtect is the name for IBM Cloud Key Protect.
+ IBMCloudServiceKeyProtect IBMCloudServiceName = "KeyProtect"
+ // IBMCloudServiceResourceController is the name for IBM Cloud Resource Controller.
+ IBMCloudServiceResourceController IBMCloudServiceName = "ResourceController"
+ // IBMCloudServiceResourceManager is the name for IBM Cloud Resource Manager.
+ IBMCloudServiceResourceManager IBMCloudServiceName = "ResourceManager"
+ // IBMCloudServiceVPC is the name for IBM Cloud VPC.
+ IBMCloudServiceVPC IBMCloudServiceName = "VPC"
+)
diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
new file mode 100644
index 0000000000..bdae466892
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go
@@ -0,0 +1,226 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIServer holds configuration (like serving certificates, client CA and CORS domains)
+// shared by all API servers in the system, among them especially kube-apiserver
+// and openshift-apiserver. The canonical name of an instance is 'cluster'.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=apiservers,scope=Cluster
+// +kubebuilder:subresource:status
+type APIServer struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec APIServerSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status APIServerStatus `json:"status"`
+}
+
+type APIServerSpec struct {
+ // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates
+ // will be used for serving secure traffic.
+ // +optional
+ ServingCerts APIServerServingCerts `json:"servingCerts"`
+ // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for
+ // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid.
+ // You usually only have to set this if you have your own PKI you wish to honor client certificates from.
+ // The ConfigMap must exist in the openshift-config namespace and contain the following required fields:
+ // - ConfigMap.Data["ca-bundle.crt"] - CA bundle.
+ // +optional
+ ClientCA ConfigMapNameReference `json:"clientCA"`
+ // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the
+ // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth
+ // server from JavaScript applications.
+ // The values are regular expressions that correspond to the Golang regular expression language.
+ // +optional
+ AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"`
+ // encryption allows the configuration of encryption of resources at the datastore layer.
+ // +optional
+ Encryption APIServerEncryption `json:"encryption"`
+ // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.
+ //
+ // If unset, a default (which may change between releases) is chosen. Note that only Old,
+ // Intermediate and Custom profiles are currently supported, and the maximum available
+ // minTLSVersion is VersionTLS12.
+ // +optional
+ TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"`
+ // audit specifies the settings for audit configuration to be applied to all OpenShift-provided
+ // API servers in the cluster.
+ // +optional
+ // +kubebuilder:default={profile: Default}
+ Audit Audit `json:"audit"`
+}
+
+// AuditProfileType defines the audit policy profile type.
+// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies;None
+type AuditProfileType string
+
+const (
+ // "None" disables audit logs.
+ NoneAuditProfileType AuditProfileType = "None"
+
+ // "Default" is the existing default audit configuration policy.
+ DefaultAuditProfileType AuditProfileType = "Default"
+
+ // "WriteRequestBodies" is similar to Default but it logs request and response
+ // HTTP payloads for write requests (create, update, patch)
+ WriteRequestBodiesAuditProfileType AuditProfileType = "WriteRequestBodies"
+
+ // "AllRequestBodies" is similar to WriteRequestBodies, but also logs request
+ // and response HTTP payloads for read requests (get, list).
+ AllRequestBodiesAuditProfileType AuditProfileType = "AllRequestBodies"
+)
+
+type Audit struct {
+ // profile specifies the name of the desired top-level audit profile to be applied to all requests
+ // sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver,
+ // openshift-apiserver and oauth-apiserver), with the exception of those requests that match
+ // one or more of the customRules.
+ //
+ // The following profiles are provided:
+ // - Default: default policy which means MetaData level logging with the exception of events
+ // (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody
+ // level).
+ // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for
+ // write requests (create, update, patch).
+ // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response
+ // HTTP payloads for read requests (get, list).
+ // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.
+ //
+ // Warning: It is not recommended to disable audit logging by using the `None` profile unless you
+ // are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues.
+ // If you disable audit logging and a support situation arises, you might need to enable audit logging
+ // and reproduce the issue in order to troubleshoot properly.
+ //
+ // If unset, the 'Default' profile is used as the default.
+ //
+ // +kubebuilder:default=Default
+ Profile AuditProfileType `json:"profile,omitempty"`
+ // customRules specify profiles per group. These profile take precedence over the
+ // top-level profile field if they apply. They are evaluation from top to bottom and
+ // the first one that matches, applies.
+ // +listType=map
+ // +listMapKey=group
+ // +optional
+ CustomRules []AuditCustomRule `json:"customRules,omitempty"`
+}
+
+// AuditCustomRule describes a custom rule for an audit profile that takes precedence over
+// the top-level profile.
+type AuditCustomRule struct {
+ // group is a name of group a request user must be member of in order to this profile to apply.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +required
+ Group string `json:"group"`
+ // profile specifies the name of the desired audit policy configuration to be deployed to
+ // all OpenShift-provided API servers in the cluster.
+ //
+ // The following profiles are provided:
+ // - Default: the existing default policy.
+ // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for
+ // write requests (create, update, patch).
+ // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response
+ // HTTP payloads for read requests (get, list).
+ // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.
+ //
+ // If unset, the 'Default' profile is used as the default.
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Profile AuditProfileType `json:"profile,omitempty"`
+}
+
+type APIServerServingCerts struct {
+ // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames.
+ // If no named certificates are provided, or no named certificates match the server name as understood by a client,
+ // the defaultServingCertificate will be used.
+ // +optional
+ NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"`
+}
+
+// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.
+type APIServerNamedServingCert struct {
+ // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to
+ // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates.
+ // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.
+ // +optional
+ Names []string `json:"names,omitempty"`
+ // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic.
+ // The secret must exist in the openshift-config namespace and contain the following required fields:
+ // - Secret.Data["tls.key"] - TLS private key.
+ // - Secret.Data["tls.crt"] - TLS certificate.
+ ServingCertificate SecretNameReference `json:"servingCertificate"`
+}
+
+type APIServerEncryption struct {
+ // type defines what encryption type should be used to encrypt resources at the datastore layer.
+ // When this field is unset (i.e. when it is set to the empty string), identity is implied.
+ // The behavior of unset can and will change over time. Even if encryption is enabled by default,
+ // the meaning of unset may change to a different encryption type based on changes in best practices.
+ //
+ // When encryption is enabled, all sensitive resources shipped with the platform are encrypted.
+ // This list of sensitive resources can and will change over time. The current authoritative list is:
+ //
+ // 1. secrets
+ // 2. configmaps
+ // 3. routes.route.openshift.io
+ // 4. oauthaccesstokens.oauth.openshift.io
+ // 5. oauthauthorizetokens.oauth.openshift.io
+ //
+ // +unionDiscriminator
+ // +optional
+ Type EncryptionType `json:"type,omitempty"`
+}
+
+// +kubebuilder:validation:Enum="";identity;aescbc;aesgcm
+type EncryptionType string
+
+const (
+ // identity refers to a type where no encryption is performed at the datastore layer.
+ // Resources are written as-is without encryption.
+ EncryptionTypeIdentity EncryptionType = "identity"
+
+ // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key
+ // is used to perform encryption at the datastore layer.
+ EncryptionTypeAESCBC EncryptionType = "aescbc"
+
+ // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key
+ // is used to perform encryption at the datastore layer.
+ EncryptionTypeAESGCM EncryptionType = "aesgcm"
+)
+
+type APIServerStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type APIServerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+ Items []APIServer `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go
new file mode 100644
index 0000000000..b3dfa61b51
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go
@@ -0,0 +1,482 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients"
+
+// Authentication specifies cluster-wide settings for authentication (like OAuth and
+// webhook token authenticators). The canonical name of an instance is `cluster`.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=authentications,scope=Cluster
+// +kubebuilder:subresource:status
+type Authentication struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec AuthenticationSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status AuthenticationStatus `json:"status"`
+}
+
+type AuthenticationSpec struct {
+ // type identifies the cluster managed, user facing authentication mode in use.
+ // Specifically, it manages the component that responds to login attempts.
+ // The default is IntegratedOAuth.
+ // +optional
+ Type AuthenticationType `json:"type"`
+
+ // oauthMetadata contains the discovery endpoint data for OAuth 2.0
+ // Authorization Server Metadata for an external OAuth server.
+ // This discovery document can be viewed from its served location:
+ // oc get --raw '/.well-known/oauth-authorization-server'
+ // For further details, see the IETF Draft:
+ // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ // If oauthMetadata.name is non-empty, this value has precedence
+ // over any metadata reference stored in status.
+ // The key "oauthMetadata" is used to locate the data.
+ // If specified and the config map or expected key is not found, no metadata is served.
+ // If the specified metadata is not valid, no metadata is served.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"`
+
+ // webhookTokenAuthenticators is DEPRECATED, setting it has no effect.
+ // +listType=atomic
+ WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"`
+
+ // webhookTokenAuthenticator configures a remote token reviewer.
+ // These remote authentication webhooks can be used to verify bearer tokens
+ // via the tokenreviews.authentication.k8s.io REST API. This is required to
+ // honor bearer tokens that are provisioned by an external authentication service.
+ //
+ // Can only be set if "Type" is set to "None".
+ //
+ // +optional
+ WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"`
+
+ // serviceAccountIssuer is the identifier of the bound service account token
+ // issuer.
+ // The default is https://kubernetes.default.svc
+ // WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the
+ // previous issuer value. Instead, the tokens issued by previous service account issuer will continue to
+ // be trusted for a time period chosen by the platform (currently set to 24h).
+ // This time period is subject to change over time.
+ // This allows internal components to transition to use new service account issuer without service distruption.
+ // +optional
+ ServiceAccountIssuer string `json:"serviceAccountIssuer"`
+
+ // OIDCProviders are OIDC identity providers that can issue tokens
+ // for this cluster
+ // Can only be set if "Type" is set to "OIDC".
+ //
+ // At most one provider can be configured.
+ //
+ // +listType=map
+ // +listMapKey=name
+ // +kubebuilder:validation:MaxItems=1
+ // +openshift:enable:FeatureGate=ExternalOIDC
+ OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"`
+}
+
+type AuthenticationStatus struct {
+ // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0
+ // Authorization Server Metadata for the in-cluster integrated OAuth server.
+ // This discovery document can be viewed from its served location:
+ // oc get --raw '/.well-known/oauth-authorization-server'
+ // For further details, see the IETF Draft:
+ // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ // This contains the observed value based on cluster state.
+ // An explicitly set value in spec.oauthMetadata has precedence over this field.
+ // This field has no meaning if authentication spec.type is not set to IntegratedOAuth.
+ // The key "oauthMetadata" is used to locate the data.
+ // If the config map or expected key is not found, no metadata is served.
+ // If the specified metadata is not valid, no metadata is served.
+ // The namespace for this config map is openshift-config-managed.
+ IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"`
+
+ // OIDCClients is where participating operators place the current OIDC client status
+ // for OIDC clients that can be customized by the cluster-admin.
+ //
+ // +listType=map
+ // +listMapKey=componentNamespace
+ // +listMapKey=componentName
+ // +kubebuilder:validation:MaxItems=20
+ // +openshift:enable:FeatureGate=ExternalOIDC
+ OIDCClients []OIDCClientStatus `json:"oidcClients"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type AuthenticationList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Authentication `json:"items"`
+}
+
+// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth
+// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC
+type AuthenticationType string
+
+const (
+ // None means that no cluster managed authentication system is in place.
+ // Note that user login will only work if a manually configured system is in place and
+ // referenced in authentication spec via oauthMetadata and
+ // webhookTokenAuthenticator/oidcProviders
+ AuthenticationTypeNone AuthenticationType = "None"
+
+ // IntegratedOAuth refers to the cluster managed OAuth server.
+ // It is configured via the top level OAuth config.
+ AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth"
+
+ // AuthenticationTypeOIDC refers to a configuration with an external
+ // OIDC server configured directly with the kube-apiserver.
+ AuthenticationTypeOIDC AuthenticationType = "OIDC"
+)
+
+// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator.
+// It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.
+type DeprecatedWebhookTokenAuthenticator struct {
+ // kubeConfig contains kube config file data which describes how to access the remote webhook service.
+ // For further details, see:
+ // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ // The key "kubeConfig" is used to locate the data.
+ // If the secret or expected key is not found, the webhook is not honored.
+ // If the specified kube config data is not valid, the webhook is not honored.
+ // The namespace for this secret is determined by the point of use.
+ KubeConfig SecretNameReference `json:"kubeConfig"`
+}
+
+// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator
+type WebhookTokenAuthenticator struct {
+ // kubeConfig references a secret that contains kube config file data which
+ // describes how to access the remote webhook service.
+ // The namespace for the referenced secret is openshift-config.
+ //
+ // For further details, see:
+ //
+ // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
+ //
+ // The key "kubeConfig" is used to locate the data.
+ // If the secret or expected key is not found, the webhook is not honored.
+ // If the specified kube config data is not valid, the webhook is not honored.
+ // +kubebuilder:validation:Required
+ // +required
+ KubeConfig SecretNameReference `json:"kubeConfig"`
+}
+
+const (
+ // OAuthMetadataKey is the key for the oauth authorization server metadata
+ OAuthMetadataKey = "oauthMetadata"
+
+ // KubeConfigKey is the key for the kube config file data in a secret
+ KubeConfigKey = "kubeConfig"
+)
+
+type OIDCProvider struct {
+ // Name of the OIDC provider
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+ // Issuer describes atributes of the OIDC token issuer
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Issuer TokenIssuer `json:"issuer"`
+
+ // OIDCClients contains configuration for the platform's clients that
+ // need to request tokens from the issuer
+ //
+ // +listType=map
+ // +listMapKey=componentNamespace
+ // +listMapKey=componentName
+ // +kubebuilder:validation:MaxItems=20
+ OIDCClients []OIDCClientConfig `json:"oidcClients"`
+
+ // ClaimMappings describes rules on how to transform information from an
+ // ID token into a cluster identity
+ ClaimMappings TokenClaimMappings `json:"claimMappings"`
+
+ // ClaimValidationRules are rules that are applied to validate token claims to authenticate users.
+ //
+ // +listType=atomic
+ ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"`
+}
+
+// +kubebuilder:validation:MinLength=1
+type TokenAudience string
+
+type TokenIssuer struct {
+ // URL is the serving URL of the token issuer.
+ // Must use the https:// scheme.
+ //
+ // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]`
+ // +kubebuilder:validation:Required
+ // +required
+ URL string `json:"issuerURL"`
+
+ // Audiences is an array of audiences that the token was issued for.
+ // Valid tokens must include at least one of these values in their
+ // "aud" claim.
+ // Must be set to exactly one value.
+ //
+ // +listType=set
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:MaxItems=10
+ // +required
+ Audiences []TokenAudience `json:"audiences"`
+
+ // CertificateAuthority is a reference to a config map in the
+ // configuration namespace. The .data of the configMap must contain
+ // the "ca-bundle.crt" key.
+ // If unset, system trust is used instead.
+ CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"`
+}
+
+type TokenClaimMappings struct {
+ // Username is a name of the claim that should be used to construct
+ // usernames for the cluster identity.
+ //
+ // Default value: "sub"
+ Username UsernameClaimMapping `json:"username,omitempty"`
+
+ // Groups is a name of the claim that should be used to construct
+ // groups for the cluster identity.
+ // The referenced claim must use array of strings values.
+ Groups PrefixedClaimMapping `json:"groups,omitempty"`
+}
+
+type TokenClaimMapping struct {
+ // Claim is a JWT token claim to be used in the mapping
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Claim string `json:"claim"`
+}
+
+type OIDCClientConfig struct {
+ // ComponentName is the name of the component that is supposed to consume this
+ // client configuration
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Required
+ // +required
+ ComponentName string `json:"componentName"`
+
+ // ComponentNamespace is the namespace of the component that is supposed to consume this
+ // client configuration
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Required
+ // +required
+ ComponentNamespace string `json:"componentNamespace"`
+
+ // ClientID is the identifier of the OIDC client from the OIDC provider
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ ClientID string `json:"clientID"`
+
+ // ClientSecret refers to a secret in the `openshift-config` namespace that
+ // contains the client secret in the `clientSecret` key of the `.data` field
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // ExtraScopes is an optional set of scopes to request tokens with.
+ //
+ // +listType=set
+ ExtraScopes []string `json:"extraScopes"`
+}
+
+type OIDCClientStatus struct {
+ // ComponentName is the name of the component that will consume a client configuration.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Required
+ // +required
+ ComponentName string `json:"componentName"`
+
+ // ComponentNamespace is the namespace of the component that will consume a client configuration.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Required
+ // +required
+ ComponentNamespace string `json:"componentNamespace"`
+
+ // CurrentOIDCClients is a list of clients that the component is currently using.
+ //
+ // +listType=map
+ // +listMapKey=issuerURL
+ // +listMapKey=clientID
+ CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"`
+
+ // ConsumingUsers is a slice of ServiceAccounts that need to have read
+ // permission on the `clientSecret` secret.
+ //
+ // +kubebuilder:validation:MaxItems=5
+ // +listType=set
+ ConsumingUsers []ConsumingUser `json:"consumingUsers"`
+
+ // Conditions are used to communicate the state of the `oidcClients` entry.
+ //
+ // Supported conditions include Available, Degraded and Progressing.
+ //
+ // If Available is true, the component is successfully using the configured client.
+ // If Degraded is true, that means something has gone wrong trying to handle the client configuration.
+ // If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.
+ //
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+type OIDCClientReference struct {
+ // OIDCName refers to the `name` of the provider from `oidcProviders`
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ OIDCProviderName string `json:"oidcProviderName"`
+
+ // URL is the serving URL of the token issuer.
+ // Must use the https:// scheme.
+ //
+ // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]`
+ // +kubebuilder:validation:Required
+ // +required
+ IssuerURL string `json:"issuerURL"`
+
+ // ClientID is the identifier of the OIDC client from the OIDC provider
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ ClientID string `json:"clientID"`
+}
+
+// +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise"
+type UsernameClaimMapping struct {
+ TokenClaimMapping `json:",inline"`
+
+ // PrefixPolicy specifies how a prefix should apply.
+ //
+ // By default, claims other than `email` will be prefixed with the issuer URL to
+ // prevent naming clashes with other plugins.
+ //
+ // Set to "NoPrefix" to disable prefixing.
+ //
+ // Example:
+ // (1) `prefix` is set to "myoidc:" and `claim` is set to "username".
+ // If the JWT claim `username` contains value `userA`, the resulting
+ // mapped value will be "myoidc:userA".
+ // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the
+ // JWT `email` claim contains value "userA@myoidc.tld", the resulting
+ // mapped value will be "myoidc:userA@myoidc.tld".
+ // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,
+ // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld",
+ // and `claim` is set to:
+ // (a) "username": the mapped value will be "https://myoidc.tld#userA"
+ // (b) "email": the mapped value will be "userA@myoidc.tld"
+ //
+ // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"}
+ PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"`
+
+ Prefix *UsernamePrefix `json:"prefix"`
+}
+
+type UsernamePrefixPolicy string
+
+var (
+ // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix
+ // If the username claim is anything else, it is prefixed by the issuerURL
+ NoOpinion UsernamePrefixPolicy = ""
+
+ // NoPrefix means the username claim value will not have any prefix
+ NoPrefix UsernamePrefixPolicy = "NoPrefix"
+
+ // Prefix means the prefix value must be specified. It cannot be empty
+ Prefix UsernamePrefixPolicy = "Prefix"
+)
+
+type UsernamePrefix struct {
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +required
+ PrefixString string `json:"prefixString"`
+}
+
+type PrefixedClaimMapping struct {
+ TokenClaimMapping `json:",inline"`
+
+ // Prefix is a string to prefix the value from the token in the result of the
+ // claim mapping.
+ //
+ // By default, no prefixing occurs.
+ //
+ // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains
+ // an array of strings "a", "b" and "c", the mapping will result in an
+ // array of string "myoidc:a", "myoidc:b" and "myoidc:c".
+ Prefix string `json:"prefix"`
+}
+
+type TokenValidationRuleType string
+
+const (
+ TokenValidationRuleTypeRequiredClaim = "RequiredClaim"
+)
+
+type TokenClaimValidationRule struct {
+ // Type sets the type of the validation rule
+ //
+ // +kubebuilder:validation:Enum={"RequiredClaim"}
+ // +kubebuilder:default="RequiredClaim"
+ Type TokenValidationRuleType `json:"type"`
+
+ // RequiredClaim allows configuring a required claim name and its expected
+ // value
+ RequiredClaim *TokenRequiredClaim `json:"requiredClaim"`
+}
+
+type TokenRequiredClaim struct {
+ // Claim is a name of a required claim. Only claims with string values are
+ // supported.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ Claim string `json:"claim"`
+
+ // RequiredValue is the required value for the claim.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ // +required
+ RequiredValue string `json:"requiredValue"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go
new file mode 100644
index 0000000000..dad47666db
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_build.go
@@ -0,0 +1,133 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Build configures the behavior of OpenShift builds for the entire cluster.
+// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.
+//
+// The canonical name is "cluster"
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=openshift-controller-manager,operatorOrdering=01
+// +openshift:capability=Build
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=builds,scope=Cluster
+// +kubebuilder:subresource:status
+type Build struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec holds user-settable values for the build controller configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec BuildSpec `json:"spec"`
+}
+
+type BuildSpec struct {
+ // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that
+ // should be trusted for image pushes and pulls during builds.
+ // The namespace for this config map is openshift-config.
+ //
+ // DEPRECATED: Additional CAs for image pull and push should be set on
+ // image.config.openshift.io/cluster instead.
+ //
+ // +optional
+ AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
+ // BuildDefaults controls the default information for Builds
+ // +optional
+ BuildDefaults BuildDefaults `json:"buildDefaults"`
+ // BuildOverrides controls override settings for builds
+ // +optional
+ BuildOverrides BuildOverrides `json:"buildOverrides"`
+}
+
+type BuildDefaults struct {
+ // DefaultProxy contains the default proxy settings for all build operations, including image pull/push
+ // and source download.
+ //
+ // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables
+ // in the build config's strategy.
+ // +optional
+ DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"`
+
+ // GitProxy contains the proxy settings for git operations only. If set, this will override
+ // any Proxy settings for all git commands, such as git clone.
+ //
+ // Values that are not set here will be inherited from DefaultProxy.
+ // +optional
+ GitProxy *ProxySpec `json:"gitProxy,omitempty"`
+
+ // Env is a set of default environment variables that will be applied to the
+ // build if the specified variables do not exist on the build
+ // +optional
+ Env []corev1.EnvVar `json:"env,omitempty"`
+
+ // ImageLabels is a list of docker labels that are applied to the resulting image.
+ // User can override a default label by providing a label with the same name in their
+ // Build/BuildConfig.
+ // +optional
+ ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
+
+ // Resources defines resource requirements to execute the build.
+ // +optional
+ Resources corev1.ResourceRequirements `json:"resources"`
+}
+
+type ImageLabel struct {
+ // Name defines the name of the label. It must have non-zero length.
+ Name string `json:"name"`
+
+ // Value defines the literal value of the label.
+ // +optional
+ Value string `json:"value,omitempty"`
+}
+
+type BuildOverrides struct {
+ // ImageLabels is a list of docker labels that are applied to the resulting image.
+ // If user provided a label in their Build/BuildConfig with the same name as one in this
+ // list, the user's label will be overwritten.
+ // +optional
+ ImageLabels []ImageLabel `json:"imageLabels,omitempty"`
+
+ // NodeSelector is a selector which must be true for the build pod to fit on a node
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // Tolerations is a list of Tolerations that will override any existing
+ // tolerations set on a build pod.
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+
+ // ForcePull overrides, if set, the equivalent value in the builds,
+ // i.e. false disables force pull for all builds,
+ // true enables force pull for all builds,
+ // independently of what each build specifies itself
+ // +optional
+ ForcePull *bool `json:"forcePull,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BuildList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Build `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
new file mode 100644
index 0000000000..7951762ccd
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go
@@ -0,0 +1,227 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterOperator is the Custom Resource object which holds the current state
+// of an operator. This object is used by operators to convey their state to
+// the rest of the cluster.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497
+// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clusteroperators,scope=Cluster,shortName=co
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name=Version,JSONPath=.status.versions[?(@.name=="operator")].version,type=string,description=The version the operator is at.
+// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string,description=Whether the operator is running and stable.
+// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string,description=Whether the operator is processing changes.
+// +kubebuilder:printcolumn:name=Degraded,JSONPath=.status.conditions[?(@.type=="Degraded")].status,type=string,description=Whether the operator is degraded.
+// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Available")].lastTransitionTime,type=date,description=The time the operator's Available status last changed.
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+type ClusterOperator struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec holds configuration that could apply to any operator.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ClusterOperatorSpec `json:"spec"`
+
+ // status holds the information about the state of an operator. It is consistent with status information across
+ // the Kubernetes ecosystem.
+ // +optional
+ Status ClusterOperatorStatus `json:"status"`
+}
+
+// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause".
+type ClusterOperatorSpec struct {
+}
+
+// ClusterOperatorStatus provides information about the status of the operator.
+// +k8s:deepcopy-gen=true
+type ClusterOperatorStatus struct {
+ // conditions describes the state of the operator's managed and monitored components.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple
+ // operand entries in the array. Available operators must report the version of the operator itself with the name "operator".
+ // An operator reports a new "operator" version when it has rolled out the new version to all of its operands.
+ // +optional
+ Versions []OperandVersion `json:"versions,omitempty"`
+
+ // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are:
+ // 1. the detailed resource driving the operator
+ // 2. operator namespaces
+ // 3. operand namespaces
+ // +optional
+ RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"`
+
+ // extension contains any additional status information specific to the
+ // operator which owns this status object.
+ // +nullable
+ // +optional
+ // +kubebuilder:pruning:PreserveUnknownFields
+ Extension runtime.RawExtension `json:"extension"`
+}
+
+type OperandVersion struct {
+ // name is the name of the particular operand this version is for. It usually matches container images, not operators.
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+
+ // version indicates which version of a particular operand is currently being managed. It must always match the Available
+ // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout
+ // 1.1.0
+ // +kubebuilder:validation:Required
+ // +required
+ Version string `json:"version"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+type ObjectReference struct {
+ // group of the referent.
+ // +kubebuilder:validation:Required
+ // +required
+ Group string `json:"group"`
+ // resource of the referent.
+ // +kubebuilder:validation:Required
+ // +required
+ Resource string `json:"resource"`
+ // namespace of the referent.
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+ // name of the referent.
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+}
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// ClusterOperatorStatusCondition represents the state of the operator's
+// managed and monitored components.
+// +k8s:deepcopy-gen=true
+type ClusterOperatorStatusCondition struct {
+ // type specifies the aspect reported by this condition.
+ // +kubebuilder:validation:Required
+ // +required
+ Type ClusterStatusConditionType `json:"type"`
+
+ // status of the condition, one of True, False, Unknown.
+ // +kubebuilder:validation:Required
+ // +required
+ Status ConditionStatus `json:"status"`
+
+ // lastTransitionTime is the time of the last update to the current status property.
+ // +kubebuilder:validation:Required
+ // +required
+ LastTransitionTime metav1.Time `json:"lastTransitionTime"`
+
+ // reason is the CamelCase reason for the condition's current status.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+
+ // message provides additional information about the current condition.
+ // This is only to be consumed by humans. It may contain Line Feed
+ // characters (U+000A), which should be rendered as new lines.
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+// ClusterStatusConditionType is an aspect of operator state.
+type ClusterStatusConditionType string
+
+const (
+ // Available indicates that the component (operator and all configured operands)
+ // is functional and available in the cluster. Available=False means at least
+ // part of the component is non-functional, and that the condition requires
+ // immediate administrator intervention.
+ OperatorAvailable ClusterStatusConditionType = "Available"
+
+ // Progressing indicates that the component (operator and all configured operands)
+ // is actively rolling out new code, propagating config changes, or otherwise
+ // moving from one steady state to another. Operators should not report
+ // progressing when they are reconciling (without action) a previously known
+ // state. If the observed cluster state has changed and the component is
+ // reacting to it (scaling up for instance), Progressing should become true
+ // since it is moving from one steady state to another.
+ OperatorProgressing ClusterStatusConditionType = "Progressing"
+
+ // Degraded indicates that the component (operator and all configured operands)
+ // does not match its desired state over a period of time resulting in a lower
+ // quality of service. The period of time may vary by component, but a Degraded
+ // state represents persistent observation of a condition. As a result, a
+ // component should not oscillate in and out of Degraded state. A component may
+ // be Available even if its degraded. For example, a component may desire 3
+ // running pods, but 1 pod is crash-looping. The component is Available but
+ // Degraded because it may have a lower quality of service. A component may be
+ // Progressing but not Degraded because the transition from one state to
+ // another does not persist over a long enough period to report Degraded. A
+ // component should not report Degraded during the course of a normal upgrade.
+ // A component may report Degraded in response to a persistent infrastructure
+ // failure that requires eventual administrator intervention. For example, if
+ // a control plane host is unhealthy and must be replaced. A component should
+ // report Degraded if unexpected errors occur over a period, but the
+ // expectation is that all unexpected errors are handled as operators mature.
+ OperatorDegraded ClusterStatusConditionType = "Degraded"
+
+ // Upgradeable indicates whether the component (operator and all configured
+ // operands) is safe to upgrade based on the current cluster state. When
+ // Upgradeable is False, the cluster-version operator will prevent the
+ // cluster from performing impacted updates unless forced. When set on
+ // ClusterVersion, the message will explain which updates (minor or patch)
+ // are impacted. When set on ClusterOperator, False will block minor
+ // OpenShift updates. The message field should contain a human readable
+ // description of what the administrator should do to allow the cluster or
+ // component to successfully update. The cluster-version operator will
+ // allow updates when this condition is not False, including when it is
+ // missing, True, or Unknown.
+ OperatorUpgradeable ClusterStatusConditionType = "Upgradeable"
+
+ // EvaluationConditionsDetected is used to indicate the result of the detection
+ // logic that was added to a component to evaluate the introduction of an
+ // invasive change that could potentially result in highly visible alerts,
+ // breakages or upgrade failures. You can concatenate multiple Reason using
+ // the "::" delimiter if you need to evaluate the introduction of multiple changes.
+ EvaluationConditionsDetected ClusterStatusConditionType = "EvaluationConditionsDetected"
+)
+
+// ClusterOperatorList is a list of OperatorStatus resources.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:compatibility-gen:level=1
+type ClusterOperatorList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ClusterOperator `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
new file mode 100644
index 0000000000..2b8c302134
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go
@@ -0,0 +1,868 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterVersion is the configuration for the ClusterVersionOperator. This is where
+// parameters related to automatic updates can be set.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/495
+// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=clusterversions,scope=Cluster
+// +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'baremetal' in self.spec.capabilities.additionalEnabledCapabilities ? 'MachineAPI' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'MachineAPI' in self.status.capabilities.enabledCapabilities) : true",message="the `baremetal` capability requires the `MachineAPI` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `MachineAPI` capability"
+// +kubebuilder:validation:XValidation:rule="has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) && self.spec.capabilities.baselineCapabilitySet == 'None' && 'marketplace' in self.spec.capabilities.additionalEnabledCapabilities ? 'OperatorLifecycleManager' in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) && 'OperatorLifecycleManager' in self.status.capabilities.enabledCapabilities) : true",message="the `marketplace` capability requires the `OperatorLifecycleManager` capability, which is neither explicitly or implicitly enabled in this cluster, please enable the `OperatorLifecycleManager` capability"
+// +kubebuilder:printcolumn:name=Version,JSONPath=.status.history[?(@.state=="Completed")].version,type=string
+// +kubebuilder:printcolumn:name=Available,JSONPath=.status.conditions[?(@.type=="Available")].status,type=string
+// +kubebuilder:printcolumn:name=Progressing,JSONPath=.status.conditions[?(@.type=="Progressing")].status,type=string
+// +kubebuilder:printcolumn:name=Since,JSONPath=.status.conditions[?(@.type=="Progressing")].lastTransitionTime,type=date
+// +kubebuilder:printcolumn:name=Status,JSONPath=.status.conditions[?(@.type=="Progressing")].message,type=string
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+type ClusterVersion struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the desired state of the cluster version - the operator will work
+ // to ensure that the desired version is applied to the cluster.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ClusterVersionSpec `json:"spec"`
+ // status contains information about the available updates and any in-progress
+ // updates.
+ // +optional
+ Status ClusterVersionStatus `json:"status"`
+}
+
+// ClusterVersionSpec is the desired version state of the cluster. It includes
+// the version the cluster should be at, how the cluster is identified, and
+// where the cluster should look for version updates.
+// +k8s:deepcopy-gen=true
+type ClusterVersionSpec struct {
+ // clusterID uniquely identifies this cluster. This is expected to be
+ // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in
+ // hexadecimal values). This is a required field.
+ // +kubebuilder:validation:Required
+ // +required
+ ClusterID ClusterID `json:"clusterID"`
+
+ // desiredUpdate is an optional field that indicates the desired value of
+ // the cluster version. Setting this value will trigger an upgrade (if
+ // the current version does not match the desired version). The set of
+ // recommended update values is listed as part of available updates in
+ // status, and setting values outside that range may cause the upgrade
+ // to fail.
+ //
+ // Some of the fields are inter-related with restrictions and meanings described here.
+ // 1. image is specified, version is specified, architecture is specified. API validation error.
+ // 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used.
+ // 3. image is specified, version is not specified, architecture is specified. API validation error.
+ // 4. image is specified, version is not specified, architecture is not specified. image is used.
+ // 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image.
+ // 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image.
+ // 7. image is not specified, version is not specified, architecture is specified. API validation error.
+ // 8. image is not specified, version is not specified, architecture is not specified. API validation error.
+ //
+ // If an upgrade fails the operator will halt and report status
+ // about the failing component. Setting the desired update value back to
+ // the previous version will cause a rollback to be attempted. Not all
+ // rollbacks will succeed.
+ //
+ // +optional
+ DesiredUpdate *Update `json:"desiredUpdate,omitempty"`
+
+ // upstream may be used to specify the preferred update server. By default
+ // it will use the appropriate update server for the cluster and region.
+ //
+ // +optional
+ Upstream URL `json:"upstream,omitempty"`
+ // channel is an identifier for explicitly requesting that a non-default
+ // set of updates be applied to this cluster. The default channel will be
+ // contain stable updates that are appropriate for production clusters.
+ //
+ // +optional
+ Channel string `json:"channel,omitempty"`
+
+ // capabilities configures the installation of optional, core
+ // cluster components. A null value here is identical to an
+ // empty object; see the child properties for default semantics.
+ // +optional
+ Capabilities *ClusterVersionCapabilitiesSpec `json:"capabilities,omitempty"`
+
+ // signatureStores contains the upstream URIs to verify release signatures and optional
+ // reference to a config map by name containing the PEM-encoded CA bundle.
+ //
+ // By default, CVO will use existing signature stores if this property is empty.
+ // The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature
+ // in these stores in parallel only when local ConfigMaps did not include a valid signature.
+ // Validation will fail if none of the signature stores reply with valid signature before timeout.
+ // Setting signatureStores will replace the default signature stores with custom signature stores.
+ // Default stores can be used with custom signature stores by adding them manually.
+ //
+ // A maximum of 32 signature stores may be configured.
+ // +kubebuilder:validation:MaxItems=32
+ // +openshift:enable:FeatureGate=SignatureStores
+ // +listType=map
+ // +listMapKey=url
+ // +optional
+ SignatureStores []SignatureStore `json:"signatureStores"`
+
+ // overrides is list of overides for components that are managed by
+ // cluster version operator. Marking a component unmanaged will prevent
+ // the operator from creating or updating the object.
+ // +listType=map
+ // +listMapKey=kind
+ // +listMapKey=group
+ // +listMapKey=namespace
+ // +listMapKey=name
+ // +optional
+ Overrides []ComponentOverride `json:"overrides,omitempty"`
+}
+
+// ClusterVersionStatus reports the status of the cluster versioning,
+// including any upgrades that are in progress. The current field will
+// be set to whichever version the cluster is reconciling to, and the
+// conditions array will report whether the update succeeded, is in
+// progress, or is failing.
+// +k8s:deepcopy-gen=true
+type ClusterVersionStatus struct {
+ // desired is the version that the cluster is reconciling towards.
+ // If the cluster is not yet fully initialized desired will be set
+ // with the information available, which may be an image or a tag.
+ // +kubebuilder:validation:Required
+ // +required
+ Desired Release `json:"desired"`
+
+ // history contains a list of the most recent versions applied to the cluster.
+ // This value may be empty during cluster startup, and then will be updated
+ // when a new update is being applied. The newest update is first in the
+ // list and it is ordered by recency. Updates in the history have state
+ // Completed if the rollout completed - if an update was failing or halfway
+ // applied the state will be Partial. Only a limited amount of update history
+ // is preserved.
+ // +listType=atomic
+ // +optional
+ History []UpdateHistory `json:"history,omitempty"`
+
+ // observedGeneration reports which version of the spec is being synced.
+ // If this value is not equal to metadata.generation, then the desired
+ // and conditions fields may represent a previous version.
+ // +kubebuilder:validation:Required
+ // +required
+ ObservedGeneration int64 `json:"observedGeneration"`
+
+ // versionHash is a fingerprint of the content that the cluster will be
+ // updated with. It is used by the operator to avoid unnecessary work
+ // and is for internal use only.
+ // +kubebuilder:validation:Required
+ // +required
+ VersionHash string `json:"versionHash"`
+
+ // capabilities describes the state of optional, core cluster components.
+ Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"`
+
+ // conditions provides information about the cluster version. The condition
+ // "Available" is set to true if the desiredUpdate has been reached. The
+ // condition "Progressing" is set to true if an update is being applied.
+ // The condition "Degraded" is set to true if an update is currently blocked
+ // by a temporary or permanent error. Conditions are only valid for the
+ // current desiredUpdate when metadata.generation is equal to
+ // status.generation.
+ // +listType=map
+ // +listMapKey=type
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // availableUpdates contains updates recommended for this
+ // cluster. Updates which appear in conditionalUpdates but not in
+ // availableUpdates may expose this cluster to known issues. This list
+ // may be empty if no updates are recommended, if the update service
+ // is unavailable, or if an invalid channel has been specified.
+ // +nullable
+ // +kubebuilder:validation:Required
+ // +listType=atomic
+ // +required
+ AvailableUpdates []Release `json:"availableUpdates"`
+
+ // conditionalUpdates contains the list of updates that may be
+ // recommended for this cluster if it meets specific required
+ // conditions. Consumers interested in the set of updates that are
+ // actually recommended for this cluster should use
+ // availableUpdates. This list may be empty if no updates are
+ // recommended, if the update service is unavailable, or if an empty
+ // or invalid channel has been specified.
+ // +listType=atomic
+ // +optional
+ ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"`
+}
+
+// UpdateState is a constant representing whether an update was successfully
+// applied to the cluster or not.
+type UpdateState string
+
+const (
+ // CompletedUpdate indicates an update was successfully applied
+ // to the cluster (all resource updates were successful).
+ CompletedUpdate UpdateState = "Completed"
+ // PartialUpdate indicates an update was never completely applied
+ // or is currently being applied.
+ PartialUpdate UpdateState = "Partial"
+)
+
+// UpdateHistory is a single attempted update to the cluster.
+type UpdateHistory struct {
+ // state reflects whether the update was fully applied. The Partial state
+ // indicates the update is not fully applied, while the Completed state
+ // indicates the update was successfully rolled out at least once (all
+ // parts of the update successfully applied).
+ // +kubebuilder:validation:Required
+ // +required
+ State UpdateState `json:"state"`
+
+ // startedTime is the time at which the update was started.
+ // +kubebuilder:validation:Required
+ // +required
+ StartedTime metav1.Time `json:"startedTime"`
+
+ // completionTime, if set, is when the update was fully applied. The update
+ // that is currently being applied will have a null completion time.
+ // Completion time will always be set for entries that are not the current
+ // update (usually to the started time of the next update).
+ // +kubebuilder:validation:Required
+ // +required
+ // +nullable
+ CompletionTime *metav1.Time `json:"completionTime"`
+
+ // version is a semantic version identifying the update version. If the
+ // requested image does not define a version, or if a failure occurs
+ // retrieving the image, this value may be empty.
+ //
+ // +optional
+ Version string `json:"version"`
+
+ // image is a container image location that contains the update. This value
+ // is always populated.
+ // +kubebuilder:validation:Required
+ // +required
+ Image string `json:"image"`
+
+ // verified indicates whether the provided update was properly verified
+ // before it was installed. If this is false the cluster may not be trusted.
+ // Verified does not cover upgradeable checks that depend on the cluster
+ // state at the time when the update target was accepted.
+ // +kubebuilder:validation:Required
+ // +required
+ Verified bool `json:"verified"`
+
+ // acceptedRisks records risks which were accepted to initiate the update.
+ // For example, it may menition an Upgradeable=False or missing signature
+ // that was overriden via desiredUpdate.force, or an update that was
+ // initiated despite not being in the availableUpdates set of recommended
+ // update targets.
+ // +optional
+ AcceptedRisks string `json:"acceptedRisks,omitempty"`
+}
+
+// ClusterID is string RFC4122 uuid.
+type ClusterID string
+
+// ClusterVersionArchitecture enumerates valid cluster architectures.
+// +kubebuilder:validation:Enum="Multi";""
+type ClusterVersionArchitecture string
+
+const (
+ // ClusterVersionArchitectureMulti identifies a multi architecture. A multi
+ // architecture cluster is capable of running nodes with multiple architectures.
+ ClusterVersionArchitectureMulti ClusterVersionArchitecture = "Multi"
+)
+
+// ClusterVersionCapability enumerates optional, core cluster components.
+// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager
+type ClusterVersionCapability string
+
+const (
+ // ClusterVersionCapabilityOpenShiftSamples manages the sample
+ // image streams and templates stored in the openshift
+ // namespace, and any registry credentials, stored as a secret,
+ // needed for the image streams to import the images they
+ // reference.
+ ClusterVersionCapabilityOpenShiftSamples ClusterVersionCapability = "openshift-samples"
+
+ // ClusterVersionCapabilityBaremetal manages the cluster
+ // baremetal operator which is responsible for running the metal3
+ // deployment.
+ ClusterVersionCapabilityBaremetal ClusterVersionCapability = "baremetal"
+
+ // ClusterVersionCapabilityMarketplace manages the Marketplace operator which
+ // supplies Operator Lifecycle Manager (OLM) users with default catalogs of
+ // "optional" operators.
+ //
+ // Note that Marketplace has a hard requirement on OLM. OLM can not be disabled
+ // while Marketplace is enabled.
+ ClusterVersionCapabilityMarketplace ClusterVersionCapability = "marketplace"
+
+ // ClusterVersionCapabilityConsole manages the Console operator which
+ // installs and maintains the web console.
+ ClusterVersionCapabilityConsole ClusterVersionCapability = "Console"
+
+ // ClusterVersionCapabilityInsights manages the Insights operator which
+ // collects anonymized information about the cluster to generate
+ // recommendations for possible cluster issues.
+ ClusterVersionCapabilityInsights ClusterVersionCapability = "Insights"
+
+ // ClusterVersionCapabilityStorage manages the storage operator which
+ // is responsible for providing cluster-wide storage defaults
+ // WARNING: Do not disable this capability when deployed to
+ // RHEV and OpenStack without reading the docs.
+ // These clusters heavily rely on that capability and may cause
+ // damage to the cluster.
+ ClusterVersionCapabilityStorage ClusterVersionCapability = "Storage"
+
+ // ClusterVersionCapabilityCSISnapshot manages the csi snapshot
+ // controller operator which is responsible for watching the
+ // VolumeSnapshot CRD objects and manages the creation and deletion
+ // lifecycle of volume snapshots
+ ClusterVersionCapabilityCSISnapshot ClusterVersionCapability = "CSISnapshot"
+
+ // ClusterVersionCapabilityNodeTuning manages the Node Tuning Operator
+ // which is responsible for watching the Tuned and Profile CRD
+ // objects and manages the containerized TuneD daemon which controls
+ // system level tuning of Nodes
+ ClusterVersionCapabilityNodeTuning ClusterVersionCapability = "NodeTuning"
+
+ // ClusterVersionCapabilityMachineAPI manages
+ // machine-api-operator
+ // cluster-autoscaler-operator
+ // cluster-control-plane-machine-set-operator
+ // which is responsible for machines configuration and heavily
+ // targeted for SNO clusters.
+ //
+ // The following CRDs are disabled as well
+ // machines
+ // machineset
+ // controlplanemachineset
+ //
+ // WARNING: Do not disable that capability without reading
+ // documentation. This is important part of openshift system
+ // and may cause cluster damage
+ ClusterVersionCapabilityMachineAPI ClusterVersionCapability = "MachineAPI"
+
+ // ClusterVersionCapabilityBuild manages the Build API which is responsible
+ // for watching the Build API objects and managing their lifecycle.
+ // The functionality is located under openshift-apiserver and openshift-controller-manager.
+ //
+ // The following resources are taken into account:
+ // - builds
+ // - buildconfigs
+ ClusterVersionCapabilityBuild ClusterVersionCapability = "Build"
+
+ // ClusterVersionCapabilityDeploymentConfig manages the DeploymentConfig API
+ // which is responsible for watching the DeploymentConfig API and managing their lifecycle.
+ // The functionality is located under openshift-apiserver and openshift-controller-manager.
+ //
+ // The following resources are taken into account:
+ // - deploymentconfigs
+ ClusterVersionCapabilityDeploymentConfig ClusterVersionCapability = "DeploymentConfig"
+
+ // ClusterVersionCapabilityImageRegistry manages the image registry which
+ // allows to distribute Docker images
+ ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry"
+
+ // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager
+ // which itself manages the lifecycle of operators
+ ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager"
+
+ // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers
+ // in openshift cluster
+ ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential"
+
+ // ClusterVersionCapabilityIngress manages the cluster ingress operator
+ // which is responsible for running the ingress controllers (including OpenShift router).
+ //
+ // The following CRDs are part of the capability as well:
+ // IngressController
+ // DNSRecord
+ // GatewayClass
+ // Gateway
+ // HTTPRoute
+ // ReferenceGrant
+ //
+ // WARNING: This capability cannot be disabled on the standalone OpenShift.
+ ClusterVersionCapabilityIngress ClusterVersionCapability = "Ingress"
+
+ // ClusterVersionCapabilityCloudControllerManager manages various Cloud Controller
+ // Managers deployed on top of OpenShift. They help you to work with cloud
+ // provider API and embeds cloud-specific control logic.
+ ClusterVersionCapabilityCloudControllerManager ClusterVersionCapability = "CloudControllerManager"
+)
+
+// KnownClusterVersionCapabilities includes all known optional, core cluster components.
+var KnownClusterVersionCapabilities = []ClusterVersionCapability{
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityConsole,
+ ClusterVersionCapabilityInsights,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityStorage,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityCSISnapshot,
+ ClusterVersionCapabilityNodeTuning,
+ ClusterVersionCapabilityMachineAPI,
+ ClusterVersionCapabilityBuild,
+ ClusterVersionCapabilityDeploymentConfig,
+ ClusterVersionCapabilityImageRegistry,
+ ClusterVersionCapabilityOperatorLifecycleManager,
+ ClusterVersionCapabilityCloudCredential,
+ ClusterVersionCapabilityIngress,
+ ClusterVersionCapabilityCloudControllerManager,
+}
+
+// ClusterVersionCapabilitySet defines sets of cluster version capabilities.
+// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;v4.15;v4.16;vCurrent
+type ClusterVersionCapabilitySet string
+
+const (
+ // ClusterVersionCapabilitySetNone is an empty set enabling
+ // no optional capabilities.
+ ClusterVersionCapabilitySetNone ClusterVersionCapabilitySet = "None"
+
+ // ClusterVersionCapabilitySet4_11 is the recommended set of
+ // optional capabilities to enable for the 4.11 version of
+ // OpenShift. This list will remain the same no matter which
+ // version of OpenShift is installed.
+ ClusterVersionCapabilitySet4_11 ClusterVersionCapabilitySet = "v4.11"
+
+ // ClusterVersionCapabilitySet4_12 is the recommended set of
+ // optional capabilities to enable for the 4.12 version of
+ // OpenShift. This list will remain the same no matter which
+ // version of OpenShift is installed.
+ ClusterVersionCapabilitySet4_12 ClusterVersionCapabilitySet = "v4.12"
+
+ // ClusterVersionCapabilitySet4_13 is the recommended set of
+ // optional capabilities to enable for the 4.13 version of
+ // OpenShift. This list will remain the same no matter which
+ // version of OpenShift is installed.
+ ClusterVersionCapabilitySet4_13 ClusterVersionCapabilitySet = "v4.13"
+
+ // ClusterVersionCapabilitySet4_14 is the recommended set of
+ // optional capabilities to enable for the 4.14 version of
+ // OpenShift. This list will remain the same no matter which
+ // version of OpenShift is installed.
+ ClusterVersionCapabilitySet4_14 ClusterVersionCapabilitySet = "v4.14"
+
+ // ClusterVersionCapabilitySet4_15 is the recommended set of
+ // optional capabilities to enable for the 4.15 version of
+ // OpenShift. This list will remain the same no matter which
+ // version of OpenShift is installed.
+ ClusterVersionCapabilitySet4_15 ClusterVersionCapabilitySet = "v4.15"
+
+ // ClusterVersionCapabilitySet4_16 is the recommended set of
+ // optional capabilities to enable for the 4.16 version of
+ // OpenShift. This list will remain the same no matter which
+ // version of OpenShift is installed.
+ ClusterVersionCapabilitySet4_16 ClusterVersionCapabilitySet = "v4.16"
+
+ // ClusterVersionCapabilitySetCurrent is the recommended set
+ // of optional capabilities to enable for the cluster's
+ // current version of OpenShift.
+ ClusterVersionCapabilitySetCurrent ClusterVersionCapabilitySet = "vCurrent"
+)
+
+// ClusterVersionCapabilitySets defines sets of cluster version capabilities.
+var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVersionCapability{
+ ClusterVersionCapabilitySetNone: {},
+ ClusterVersionCapabilitySet4_11: {
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityMachineAPI,
+ },
+ ClusterVersionCapabilitySet4_12: {
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityConsole,
+ ClusterVersionCapabilityInsights,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityStorage,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityCSISnapshot,
+ ClusterVersionCapabilityMachineAPI,
+ },
+ ClusterVersionCapabilitySet4_13: {
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityConsole,
+ ClusterVersionCapabilityInsights,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityStorage,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityCSISnapshot,
+ ClusterVersionCapabilityNodeTuning,
+ ClusterVersionCapabilityMachineAPI,
+ },
+ ClusterVersionCapabilitySet4_14: {
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityConsole,
+ ClusterVersionCapabilityInsights,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityStorage,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityCSISnapshot,
+ ClusterVersionCapabilityNodeTuning,
+ ClusterVersionCapabilityMachineAPI,
+ ClusterVersionCapabilityBuild,
+ ClusterVersionCapabilityDeploymentConfig,
+ ClusterVersionCapabilityImageRegistry,
+ },
+ ClusterVersionCapabilitySet4_15: {
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityConsole,
+ ClusterVersionCapabilityInsights,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityStorage,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityCSISnapshot,
+ ClusterVersionCapabilityNodeTuning,
+ ClusterVersionCapabilityMachineAPI,
+ ClusterVersionCapabilityBuild,
+ ClusterVersionCapabilityDeploymentConfig,
+ ClusterVersionCapabilityImageRegistry,
+ ClusterVersionCapabilityOperatorLifecycleManager,
+ ClusterVersionCapabilityCloudCredential,
+ },
+ ClusterVersionCapabilitySet4_16: {
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityConsole,
+ ClusterVersionCapabilityInsights,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityStorage,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityCSISnapshot,
+ ClusterVersionCapabilityNodeTuning,
+ ClusterVersionCapabilityMachineAPI,
+ ClusterVersionCapabilityBuild,
+ ClusterVersionCapabilityDeploymentConfig,
+ ClusterVersionCapabilityImageRegistry,
+ ClusterVersionCapabilityOperatorLifecycleManager,
+ ClusterVersionCapabilityCloudCredential,
+ ClusterVersionCapabilityIngress,
+ ClusterVersionCapabilityCloudControllerManager,
+ },
+ ClusterVersionCapabilitySetCurrent: {
+ ClusterVersionCapabilityBaremetal,
+ ClusterVersionCapabilityConsole,
+ ClusterVersionCapabilityInsights,
+ ClusterVersionCapabilityMarketplace,
+ ClusterVersionCapabilityStorage,
+ ClusterVersionCapabilityOpenShiftSamples,
+ ClusterVersionCapabilityCSISnapshot,
+ ClusterVersionCapabilityNodeTuning,
+ ClusterVersionCapabilityMachineAPI,
+ ClusterVersionCapabilityBuild,
+ ClusterVersionCapabilityDeploymentConfig,
+ ClusterVersionCapabilityImageRegistry,
+ ClusterVersionCapabilityOperatorLifecycleManager,
+ ClusterVersionCapabilityCloudCredential,
+ ClusterVersionCapabilityIngress,
+ ClusterVersionCapabilityCloudControllerManager,
+ },
+}
+
+// ClusterVersionCapabilitiesSpec selects the managed set of
+// optional, core cluster components.
+// +k8s:deepcopy-gen=true
+type ClusterVersionCapabilitiesSpec struct {
+ // baselineCapabilitySet selects an initial set of
+ // optional capabilities to enable, which can be extended via
+ // additionalEnabledCapabilities. If unset, the cluster will
+ // choose a default, and the default may change over time.
+ // The current default is vCurrent.
+ // +optional
+ BaselineCapabilitySet ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"`
+
+ // additionalEnabledCapabilities extends the set of managed
+ // capabilities beyond the baseline defined in
+ // baselineCapabilitySet. The default is an empty set.
+ // +listType=atomic
+ // +optional
+ AdditionalEnabledCapabilities []ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"`
+}
+
+// ClusterVersionCapabilitiesStatus describes the state of optional,
+// core cluster components.
+// +k8s:deepcopy-gen=true
+type ClusterVersionCapabilitiesStatus struct {
+ // enabledCapabilities lists all the capabilities that are currently managed.
+ // +listType=atomic
+ // +optional
+ EnabledCapabilities []ClusterVersionCapability `json:"enabledCapabilities,omitempty"`
+
+ // knownCapabilities lists all the capabilities known to the current cluster.
+ // +listType=atomic
+ // +optional
+ KnownCapabilities []ClusterVersionCapability `json:"knownCapabilities,omitempty"`
+}
+
+// ComponentOverride allows overriding cluster version operator's behavior
+// for a component.
+// +k8s:deepcopy-gen=true
+type ComponentOverride struct {
+ // kind indentifies which object to override.
+ // +kubebuilder:validation:Required
+ // +required
+ Kind string `json:"kind"`
+ // group identifies the API group that the kind is in.
+ // +kubebuilder:validation:Required
+ // +required
+ Group string `json:"group"`
+
+ // namespace is the component's namespace. If the resource is cluster
+ // scoped, the namespace should be empty.
+ // +kubebuilder:validation:Required
+ // +required
+ Namespace string `json:"namespace"`
+ // name is the component's name.
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+
+ // unmanaged controls if cluster version operator should stop managing the
+ // resources in this cluster.
+ // Default: false
+ // +kubebuilder:validation:Required
+ // +required
+ Unmanaged bool `json:"unmanaged"`
+}
+
+// URL is a thin wrapper around string that ensures the string is a valid URL.
+type URL string
+
+// Update represents an administrator update request.
+// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == '' || self.image == '') : true",message="cannot set both Architecture and Image"
+// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != '' ? self.version != '' : true",message="Version must be set if Architecture is set"
+// +k8s:deepcopy-gen=true
+type Update struct {
+ // architecture is an optional field that indicates the desired
+ // value of the cluster architecture. In this context cluster
+ // architecture means either a single architecture or a multi
+ // architecture. architecture can only be set to Multi thereby
+ // only allowing updates from single to multi architecture. If
+ // architecture is set, image cannot be set and version must be
+ // set.
+ // Valid values are 'Multi' and empty.
+ //
+ // +optional
+ Architecture ClusterVersionArchitecture `json:"architecture"`
+
+ // version is a semantic version identifying the update version.
+ // version is ignored if image is specified and required if
+ // architecture is specified.
+ //
+ // +optional
+ Version string `json:"version"`
+
+ // image is a container image location that contains the update.
+ // image should be used when the desired version does not exist in availableUpdates or history.
+ // When image is set, version is ignored. When image is set, version should be empty.
+ // When image is set, architecture cannot be specified.
+ //
+ // +optional
+ Image string `json:"image"`
+
+ // force allows an administrator to update to an image that has failed
+ // verification or upgradeable checks. This option should only
+ // be used when the authenticity of the provided image has been verified out
+ // of band because the provided image will run with full administrative access
+ // to the cluster. Do not use this flag with images that comes from unknown
+ // or potentially malicious sources.
+ //
+ // +optional
+ Force bool `json:"force"`
+}
+
+// Release represents an OpenShift release image and associated metadata.
+// +k8s:deepcopy-gen=true
+type Release struct {
+ // version is a semantic version identifying the update version. When this
+ // field is part of spec, version is optional if image is specified.
+ // +required
+ Version string `json:"version"`
+
+ // image is a container image location that contains the update. When this
+ // field is part of spec, image is optional if version is specified and the
+ // availableUpdates field contains a matching version.
+ // +required
+ Image string `json:"image"`
+
+ // url contains information about this release. This URL is set by
+ // the 'url' metadata property on a release or the metadata returned by
+ // the update API and should be displayed as a link in user
+ // interfaces. The URL field may not be set for test or nightly
+ // releases.
+ // +optional
+ URL URL `json:"url,omitempty"`
+
+ // channels is the set of Cincinnati channels to which the release
+ // currently belongs.
+ // +listType=set
+ // +optional
+ Channels []string `json:"channels,omitempty"`
+}
+
+// RetrievedUpdates reports whether available updates have been retrieved from
+// the upstream update server. The condition is Unknown before retrieval, False
+// if the updates could not be retrieved or recently failed, or True if the
+// availableUpdates field is accurate and recent.
+const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates"
+
+// ConditionalUpdate represents an update which is recommended to some
+// clusters on the version the current cluster is reconciling, but which
+// may not be recommended for the current cluster.
+type ConditionalUpdate struct {
+ // release is the target of the update.
+ // +kubebuilder:validation:Required
+ // +required
+ Release Release `json:"release"`
+
+ // risks represents the range of issues associated with
+ // updating to the target release. The cluster-version
+ // operator will evaluate all entries, and only recommend the
+ // update if there is at least one entry and all entries
+ // recommend the update.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +required
+ Risks []ConditionalUpdateRisk `json:"risks" patchStrategy:"merge" patchMergeKey:"name"`
+
+ // conditions represents the observations of the conditional update's
+ // current status. Known types are:
+ // * Recommended, for whether the update is recommended for the current cluster.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+}
+
+// ConditionalUpdateRisk represents a reason and cluster-state
+// for not recommending a conditional update.
+// +k8s:deepcopy-gen=true
+type ConditionalUpdateRisk struct {
+ // url contains information about this risk.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Format=uri
+ // +kubebuilder:validation:MinLength=1
+ // +required
+ URL string `json:"url"`
+
+ // name is the CamelCase reason for not recommending a
+ // conditional update, in the event that matchingRules match the
+ // cluster state.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +required
+ Name string `json:"name"`
+
+ // message provides additional information about the risk of
+ // updating, in the event that matchingRules match the cluster
+ // state. This is only to be consumed by humans. It may
+ // contain Line Feed characters (U+000A), which should be
+ // rendered as new lines.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +required
+ Message string `json:"message"`
+
+ // matchingRules is a slice of conditions for deciding which
+ // clusters match the risk and which do not. The slice is
+ // ordered by decreasing precedence. The cluster-version
+ // operator will walk the slice in order, and stop after the
+ // first it can successfully evaluate. If no condition can be
+ // successfully evaluated, the update will not be recommended.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ // +listType=atomic
+ // +required
+ MatchingRules []ClusterCondition `json:"matchingRules"`
+}
+
+// ClusterCondition is a union of typed cluster conditions. The 'type'
+// property determines which of the type-specific properties are relevant.
+// When evaluated on a cluster, the condition may match, not match, or
+// fail to evaluate.
+// +k8s:deepcopy-gen=true
+type ClusterCondition struct {
+ // type represents the cluster-condition type. This defines
+ // the members and semantics of any additional properties.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum={"Always","PromQL"}
+ // +required
+ Type string `json:"type"`
+
+ // promQL represents a cluster condition based on PromQL.
+ // +optional
+ PromQL *PromQLClusterCondition `json:"promql,omitempty"`
+}
+
+// PromQLClusterCondition represents a cluster condition based on PromQL.
+type PromQLClusterCondition struct {
+ // PromQL is a PromQL query classifying clusters. This query
+ // query should return a 1 in the match case and a 0 in the
+ // does-not-match case. Queries which return no time
+ // series, or which return values besides 0 or 1, are
+ // evaluation failures.
+ // +kubebuilder:validation:Required
+ // +required
+ PromQL string `json:"promql"`
+}
+
+// ClusterVersionList is a list of ClusterVersion resources.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:compatibility-gen:level=1
+type ClusterVersionList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ClusterVersion `json:"items"`
+}
+
+// SignatureStore represents the URL of custom Signature Store
+type SignatureStore struct {
+
+ // url contains the upstream custom signature store URL.
+ // url should be a valid absolute http/https URI of an upstream signature store as per rfc1738.
+ // This must be provided and cannot be empty.
+ //
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL"
+ // +kubebuilder:validation:Required
+ URL string `json:"url"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the signature store is not honored.
+ // If the specified ca data is not valid, the signature store is not honored.
+ // If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go
new file mode 100644
index 0000000000..36b1696af9
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_console.go
@@ -0,0 +1,80 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Console holds cluster-wide configuration for the web console, including the
+// logout URL, and reports the public URL of the console. The canonical name is
+// `cluster`.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=consoles,scope=Cluster
+// +kubebuilder:subresource:status
+type Console struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ConsoleSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ConsoleStatus `json:"status"`
+}
+
+// ConsoleSpec is the specification of the desired behavior of the Console.
+type ConsoleSpec struct {
+ // +optional
+ Authentication ConsoleAuthentication `json:"authentication"`
+}
+
+// ConsoleStatus defines the observed status of the Console.
+type ConsoleStatus struct {
+ // The URL for the console. This will be derived from the host for the route that
+ // is created for the console.
+ ConsoleURL string `json:"consoleURL"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ConsoleList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Console `json:"items"`
+}
+
+// ConsoleAuthentication defines a list of optional configuration for console authentication.
+type ConsoleAuthentication struct {
+ // An optional, absolute URL to redirect web browsers to after logging out of
+ // the console. If not specified, it will redirect to the default login page.
+ // This is required when using an identity provider that supports single
+ // sign-on (SSO) such as:
+ // - OpenID (Keycloak, Azure)
+ // - RequestHeader (GSSAPI, SSPI, SAML)
+ // - OAuth (GitHub, GitLab, Google)
+ // Logging out of the console will destroy the user's token. The logoutRedirect
+ // provides the user the option to perform single logout (SLO) through the identity
+ // provider to destroy their single sign-on session.
+ // +optional
+ // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$`
+ LogoutRedirect string `json:"logoutRedirect,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go
new file mode 100644
index 0000000000..1875c9cddf
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_dns.go
@@ -0,0 +1,140 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DNS holds cluster-wide information about DNS. The canonical name is `cluster`
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=dnses,scope=Cluster
+// +kubebuilder:subresource:status
+type DNS struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec DNSSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status DNSStatus `json:"status"`
+}
+
+type DNSSpec struct {
+ // baseDomain is the base domain of the cluster. All managed DNS records will
+ // be sub-domains of this base.
+ //
+ // For example, given the base domain `openshift.example.com`, an API server
+ // DNS record may be created for `cluster-api.openshift.example.com`.
+ //
+ // Once set, this field cannot be changed.
+ BaseDomain string `json:"baseDomain"`
+ // publicZone is the location where all the DNS records that are publicly accessible to
+ // the internet exist.
+ //
+ // If this field is nil, no public records should be created.
+ //
+ // Once set, this field cannot be changed.
+ //
+ // +optional
+ PublicZone *DNSZone `json:"publicZone,omitempty"`
+ // privateZone is the location where all the DNS records that are only available internally
+ // to the cluster exist.
+ //
+ // If this field is nil, no private records should be created.
+ //
+ // Once set, this field cannot be changed.
+ //
+ // +optional
+ PrivateZone *DNSZone `json:"privateZone,omitempty"`
+ // platform holds configuration specific to the underlying
+ // infrastructure provider for DNS.
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose reasonable defaults. These defaults are subject to change over time.
+ // +optional
+ Platform DNSPlatformSpec `json:"platform,omitempty"`
+}
+
+// DNSZone is used to define a DNS hosted zone.
+// A zone can be identified by an ID or tags.
+type DNSZone struct {
+ // id is the identifier that can be used to find the DNS hosted zone.
+ //
+ // on AWS zone can be fetched using `ID` as id in [1]
+ // on Azure zone can be fetched using `ID` as a pre-determined name in [2],
+ // on GCP zone can be fetched using `ID` as a pre-determined name in [3].
+ //
+ // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options
+ // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show
+ // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get
+ // +optional
+ ID string `json:"id,omitempty"`
+
+ // tags can be used to query the DNS hosted zone.
+ //
+ // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,
+ //
+ // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options
+ // +optional
+ Tags map[string]string `json:"tags,omitempty"`
+}
+
+type DNSStatus struct {
+ // dnsSuffix (service-ca amongst others)
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DNSList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []DNS `json:"items"`
+}
+
+// DNSPlatformSpec holds cloud-provider-specific configuration
+// for DNS administration.
+// +union
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws configuration is required when platform is AWS, and forbidden otherwise"
+type DNSPlatformSpec struct {
+ // type is the underlying infrastructure provider for the cluster.
+ // Allowed values: "", "AWS".
+ //
+ // Individual components may not support all platforms,
+ // and must handle unrecognized platforms with best-effort defaults.
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'"
+ Type PlatformType `json:"type"`
+
+ // aws contains DNS configuration specific to the Amazon Web Services cloud provider.
+ // +optional
+ AWS *AWSDNSSpec `json:"aws"`
+}
+
+// AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.
+type AWSDNSSpec struct {
+ // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing
+ // operations on the cluster's private hosted zone specified in the cluster DNS config.
+ // When left empty, no role should be assumed.
+ // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$`
+ // +optional
+ PrivateZoneIAMRole string `json:"privateZoneIAMRole"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go
new file mode 100644
index 0000000000..1e03171961
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_feature.go
@@ -0,0 +1,150 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Feature holds cluster-wide information about feature gates. The canonical name is `cluster`
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=featuregates,scope=Cluster
+// +kubebuilder:subresource:status
+type FeatureGate struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec FeatureGateSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status FeatureGateStatus `json:"status"`
+}
+
+type FeatureSet string
+
+var (
+ // Default feature set that allows upgrades.
+ Default FeatureSet = ""
+
+ // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning
+ // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES.
+ TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade"
+
+ // DevPreviewNoUpgrade turns on dev preview features that are not part of the normal supported platform. Turning
+ // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES.
+ DevPreviewNoUpgrade FeatureSet = "DevPreviewNoUpgrade"
+
+ // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
+ // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
+ // your cluster may fail in an unrecoverable way.
+ CustomNoUpgrade FeatureSet = "CustomNoUpgrade"
+
+ // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead
+ AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade}
+)
+
+type FeatureGateSpec struct {
+ FeatureGateSelection `json:",inline"`
+}
+
+// +union
+type FeatureGateSelection struct {
+ // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting.
+ // Turning on or off features may cause irreversible changes in your cluster which cannot be undone.
+ // +unionDiscriminator
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed"
+ FeatureSet FeatureSet `json:"featureSet,omitempty"`
+
+ // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES.
+ // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
+ // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field.
+ // +optional
+ // +nullable
+ CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"`
+}
+
+type CustomFeatureGates struct {
+ // enabled is a list of all feature gates that you want to force on
+ // +optional
+ Enabled []FeatureGateName `json:"enabled,omitempty"`
+ // disabled is a list of all feature gates that you want to force off
+ // +optional
+ Disabled []FeatureGateName `json:"disabled,omitempty"`
+}
+
+// FeatureGateName is a string to enforce patterns on the name of a FeatureGate
+// +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$`
+type FeatureGateName string
+
+type FeatureGateStatus struct {
+ // conditions represent the observations of the current state.
+ // Known .status.conditions.type are: "DeterminationDegraded"
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion.
+ // Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate
+ // the version they are managing, find the enabled/disabled featuregates and make the operand and operator match.
+ // The enabled/disabled values for a particular version may change during the life of the cluster as various
+ // .spec.featureSet values are selected.
+ // Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable
+ // lists is beyond the scope of this API and is the responsibility of individual operators.
+ // Only featureGates with .version in the ClusterVersion.status will be present in this list.
+ // +listType=map
+ // +listMapKey=version
+ FeatureGates []FeatureGateDetails `json:"featureGates"`
+}
+
+type FeatureGateDetails struct {
+ // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field.
+ // +kubebuilder:validation:Required
+ // +required
+ Version string `json:"version"`
+ // enabled is a list of all feature gates that are enabled in the cluster for the named version.
+ // +optional
+ Enabled []FeatureGateAttributes `json:"enabled"`
+ // disabled is a list of all feature gates that are disabled in the cluster for the named version.
+ // +optional
+ Disabled []FeatureGateAttributes `json:"disabled"`
+}
+
+type FeatureGateAttributes struct {
+ // name is the name of the FeatureGate.
+ // +kubebuilder:validation:Required
+ Name FeatureGateName `json:"name"`
+
+ // possible (probable?) future additions include
+ // 1. support level (Stable, ServiceDeliveryOnly, TechPreview, DevPreview)
+ // 2. description
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type FeatureGateList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []FeatureGate `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go
new file mode 100644
index 0000000000..74511f8640
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_image.go
@@ -0,0 +1,137 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Image governs policies related to imagestream imports and runtime configuration
+// for external registries. It allows cluster admins to configure which registries
+// OpenShift is allowed to import images from, extra CA trust bundles for external
+// registries, and policies to block or allow registry hostnames.
+// When exposing OpenShift's image registry to the public, this also lets cluster
+// admins specify the external hostname.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=images,scope=Cluster
+// +kubebuilder:subresource:status
+type Image struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ImageSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ImageStatus `json:"status"`
+}
+
+type ImageSpec struct {
+ // allowedRegistriesForImport limits the container image registries that normal users may import
+ // images from. Set this list to the registries that you trust to contain valid Docker
+ // images and that you want applications to be able to import from. Users with
+ // permission to create Images or ImageStreamMappings via the API are not affected by
+ // this policy - typically only administrators or system integrations will have those
+ // permissions.
+ // +optional
+ AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"`
+
+ // externalRegistryHostnames provides the hostnames for the default external image
+ // registry. The external hostname should be set only when the image registry
+ // is exposed externally. The first value is used in 'publicDockerImageRepository'
+ // field in ImageStreams. The value must be in "hostname[:port]" format.
+ // +optional
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
+
+ // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that
+ // should be trusted during imagestream import, pod image pull, build image pull, and
+ // imageregistry pullthrough.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"`
+
+ // registrySources contains configuration that determines how the container runtime
+ // should treat individual registries when accessing images for builds+pods. (e.g.
+ // whether or not to allow insecure access). It does not contain configuration for the
+ // internal cluster registry.
+ // +optional
+ RegistrySources RegistrySources `json:"registrySources"`
+}
+
+type ImageStatus struct {
+ // internalRegistryHostname sets the hostname for the default internal image
+ // registry. The value must be in "hostname[:port]" format.
+ // This value is set by the image registry operator which controls the internal registry
+ // hostname.
+ // +optional
+ InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"`
+
+ // externalRegistryHostnames provides the hostnames for the default external image
+ // registry. The external hostname should be set only when the image registry
+ // is exposed externally. The first value is used in 'publicDockerImageRepository'
+ // field in ImageStreams. The value must be in "hostname[:port]" format.
+ // +optional
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Image `json:"items"`
+}
+
+// RegistryLocation contains a location of the registry specified by the registry domain
+// name. The domain name might include wildcards, like '*' or '??'.
+type RegistryLocation struct {
+ // domainName specifies a domain name for the registry
+ // In case the registry use non-standard (80 or 443) port, the port should be included
+ // in the domain name as well.
+ DomainName string `json:"domainName"`
+ // insecure indicates whether the registry is secure (https) or insecure (http)
+ // By default (if not specified) the registry is assumed as secure.
+ // +optional
+ Insecure bool `json:"insecure,omitempty"`
+}
+
+// RegistrySources holds cluster-wide information about how to handle the registries config.
+type RegistrySources struct {
+ // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.
+ // +optional
+ InsecureRegistries []string `json:"insecureRegistries,omitempty"`
+ // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.
+ //
+ // Only one of BlockedRegistries or AllowedRegistries may be set.
+ // +optional
+ BlockedRegistries []string `json:"blockedRegistries,omitempty"`
+ // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.
+ //
+ // Only one of BlockedRegistries or AllowedRegistries may be set.
+ // +optional
+ AllowedRegistries []string `json:"allowedRegistries,omitempty"`
+ // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified
+ // domains in their pull specs. Registries will be searched in the order provided in the list.
+ // Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.
+ // +optional
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:Format=hostname
+ // +listType=set
+ ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go
new file mode 100644
index 0000000000..f2faf1996d
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go
@@ -0,0 +1,100 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules.
+// When multiple policies are defined, the outcome of the behavior is defined on each field.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/874
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=imagecontentpolicies,scope=Cluster
+// +kubebuilder:subresource:status
+type ImageContentPolicy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ImageContentPolicySpec `json:"spec"`
+}
+
+// ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.
+type ImageContentPolicySpec struct {
+ // repositoryDigestMirrors allows images referenced by image digests in pods to be
+ // pulled from alternative mirrored repository locations. The image pull specification
+ // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors
+ // and the image may be pulled down from any of the mirrors in the list instead of the
+ // specified repository allowing administrators to choose a potentially faster mirror.
+ // To pull image from mirrors by tags, should set the "allowMirrorByTags".
+ //
+ // Each “source” repository is treated independently; configurations for different “source”
+ // repositories don’t interact.
+ //
+ // If the "mirrors" is not specified, the image will continue to be pulled from the specified
+ // repository in the pull spec.
+ //
+ // When multiple policies are defined for the same “source” repository, the sets of defined
+ // mirrors will be merged together, preserving the relative order of the mirrors, if possible.
+ // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the
+ // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict
+ // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.
+ // +optional
+ // +listType=map
+ // +listMapKey=source
+ RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageContentPolicyList lists the items in the ImageContentPolicy CRD.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageContentPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ImageContentPolicy `json:"items"`
+}
+
+// RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.
+type RepositoryDigestMirrors struct {
+ // source is the repository that users refer to, e.g. in image pull specifications.
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$`
+ Source string `json:"source"`
+ // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests.
+ // Pulling images by tag can potentially yield different images, depending on which endpoint
+ // we pull from. Forcing digest-pulls for mirrors avoids that issue.
+ // +optional
+ AllowMirrorByTags bool `json:"allowMirrorByTags,omitempty"`
+ // mirrors is zero or more repositories that may also contain the same images.
+ // If the "mirrors" is not specified, the image will continue to be pulled from the specified
+ // repository in the pull spec. No mirror will be configured.
+ // The order of mirrors in this list is treated as the user's desired priority, while source
+ // is by default considered lower priority than all mirrors. Other cluster configuration,
+ // including (but not limited to) other repositoryDigestMirrors objects,
+ // may impact the exact order mirrors are contacted in, or some mirrors may be contacted
+ // in parallel, so this should be considered a preference rather than a guarantee of ordering.
+ // +optional
+ // +listType=set
+ Mirrors []Mirror `json:"mirrors,omitempty"`
+}
+
+// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$`
+type Mirror string
diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go
new file mode 100644
index 0000000000..8fa38f223b
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go
@@ -0,0 +1,142 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification.
+// When multiple policies are defined, the outcome of the behavior is defined on each field.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=imagedigestmirrorsets,scope=Cluster,shortName=idms
+// +kubebuilder:subresource:status
+type ImageDigestMirrorSet struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ImageDigestMirrorSetSpec `json:"spec"`
+ // status contains the observed state of the resource.
+ // +optional
+ Status ImageDigestMirrorSetStatus `json:"status,omitempty"`
+}
+
+// ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD.
+type ImageDigestMirrorSetSpec struct {
+ // imageDigestMirrors allows images referenced by image digests in pods to be
+ // pulled from alternative mirrored repository locations. The image pull specification
+ // provided to the pod will be compared to the source locations described in imageDigestMirrors
+ // and the image may be pulled down from any of the mirrors in the list instead of the
+ // specified repository allowing administrators to choose a potentially faster mirror.
+ // To use mirrors to pull images using tag specification, users should configure
+ // a list of mirrors using "ImageTagMirrorSet" CRD.
+ //
+ // If the image pull specification matches the repository of "source" in multiple imagedigestmirrorset objects,
+ // only the objects which define the most specific namespace match will be used.
+ // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as
+ // the "source", only the objects using quay.io/libpod/busybox are going to apply
+ // for pull specification quay.io/libpod/busybox.
+ // Each “source” repository is treated independently; configurations for different “source”
+ // repositories don’t interact.
+ //
+ // If the "mirrors" is not specified, the image will continue to be pulled from the specified
+ // repository in the pull spec.
+ //
+ // When multiple policies are defined for the same “source” repository, the sets of defined
+ // mirrors will be merged together, preserving the relative order of the mirrors, if possible.
+ // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the
+ // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict
+ // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.
+ // Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order.
+ // +optional
+ // +listType=atomic
+ ImageDigestMirrors []ImageDigestMirrors `json:"imageDigestMirrors"`
+}
+
+type ImageDigestMirrorSetStatus struct{}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageDigestMirrorSetList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ImageDigestMirrorSet `json:"items"`
+}
+
+// +kubebuilder:validation:Pattern=`^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$`
+type ImageMirror string
+
+// MirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors.
+// +kubebuilder:validation:Enum=NeverContactSource;AllowContactingSource
+type MirrorSourcePolicy string
+
+const (
+ // NeverContactSource prevents image pull from the specified repository in the pull spec if the image pull from the mirror list fails.
+ NeverContactSource MirrorSourcePolicy = "NeverContactSource"
+
+ // AllowContactingSource allows falling back to the specified repository in the pull spec if the image pull from the mirror list fails.
+ AllowContactingSource MirrorSourcePolicy = "AllowContactingSource"
+)
+
+// ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.
+type ImageDigestMirrors struct {
+ // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname
+ // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry.
+ // "source" uses one of the following formats:
+ // host[:port]
+ // host[:port]/namespace[/namespace…]
+ // host[:port]/namespace[/namespace…]/repo
+ // [*.]host
+ // for more information about the format, see the document about the location field:
+ // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$`
+ Source string `json:"source"`
+ // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified.
+ // Images can be pulled from these mirrors only if they are referenced by their digests.
+ // The mirrored location is obtained by replacing the part of the input reference that
+ // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference,
+ // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo
+ // repository to be used.
+ // The order of mirrors in this list is treated as the user's desired priority, while source
+ // is by default considered lower priority than all mirrors.
+ // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be
+ // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy"
+ // Other cluster configuration, including (but not limited to) other imageDigestMirrors objects,
+ // may impact the exact order mirrors are contacted in, or some mirrors may be contacted
+ // in parallel, so this should be considered a preference rather than a guarantee of ordering.
+ // "mirrors" uses one of the following formats:
+ // host[:port]
+ // host[:port]/namespace[/namespace…]
+ // host[:port]/namespace[/namespace…]/repo
+ // for more information about the format, see the document about the location field:
+ // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table
+ // +optional
+ // +listType=set
+ Mirrors []ImageMirror `json:"mirrors,omitempty"`
+ // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors.
+ // If unset, the image will continue to be pulled from the the repository in the pull spec.
+ // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.
+ // +optional
+ MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go
new file mode 100644
index 0000000000..d9627b78cc
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go
@@ -0,0 +1,129 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification.
+// When multiple policies are defined, the outcome of the behavior is defined on each field.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1126
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=imagetagmirrorsets,scope=Cluster,shortName=itms
+// +kubebuilder:subresource:status
+type ImageTagMirrorSet struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ImageTagMirrorSetSpec `json:"spec"`
+ // status contains the observed state of the resource.
+ // +optional
+ Status ImageTagMirrorSetStatus `json:"status,omitempty"`
+}
+
+// ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD.
+type ImageTagMirrorSetSpec struct {
+ // imageTagMirrors allows images referenced by image tags in pods to be
+ // pulled from alternative mirrored repository locations. The image pull specification
+ // provided to the pod will be compared to the source locations described in imageTagMirrors
+ // and the image may be pulled down from any of the mirrors in the list instead of the
+ // specified repository allowing administrators to choose a potentially faster mirror.
+ // To use mirrors to pull images using digest specification only, users should configure
+ // a list of mirrors using "ImageDigestMirrorSet" CRD.
+ //
+ // If the image pull specification matches the repository of "source" in multiple imagetagmirrorset objects,
+ // only the objects which define the most specific namespace match will be used.
+ // For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as
+ // the "source", only the objects using quay.io/libpod/busybox are going to apply
+ // for pull specification quay.io/libpod/busybox.
+ // Each “source” repository is treated independently; configurations for different “source”
+ // repositories don’t interact.
+ //
+ // If the "mirrors" is not specified, the image will continue to be pulled from the specified
+ // repository in the pull spec.
+ //
+ // When multiple policies are defined for the same “source” repository, the sets of defined
+ // mirrors will be merged together, preserving the relative order of the mirrors, if possible.
+ // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the
+ // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict
+ // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.
+ // Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order.
+ // +optional
+ // +listType=atomic
+ ImageTagMirrors []ImageTagMirrors `json:"imageTagMirrors"`
+}
+
+type ImageTagMirrorSetStatus struct{}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageTagMirrorSetList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ImageTagMirrorSet `json:"items"`
+}
+
+// ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config.
+type ImageTagMirrors struct {
+ // source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname
+ // e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry.
+ // "source" uses one of the following formats:
+ // host[:port]
+ // host[:port]/namespace[/namespace…]
+ // host[:port]/namespace[/namespace…]/repo
+ // [*.]host
+ // for more information about the format, see the document about the location field:
+ // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$`
+ Source string `json:"source"`
+ // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified.
+ // Images can be pulled from these mirrors only if they are referenced by their tags.
+ // The mirrored location is obtained by replacing the part of the input reference that
+ // matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference,
+ // a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo
+ // repository to be used.
+ // Pulling images by tag can potentially yield different images, depending on which endpoint we pull from.
+ // Configuring a list of mirrors using "ImageDigestMirrorSet" CRD and forcing digest-pulls for mirrors avoids that issue.
+ // The order of mirrors in this list is treated as the user's desired priority, while source
+ // is by default considered lower priority than all mirrors.
+ // If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be
+ // pulled from the repository in the pull spec unless explicitly prohibited by "mirrorSourcePolicy".
+ // Other cluster configuration, including (but not limited to) other imageTagMirrors objects,
+ // may impact the exact order mirrors are contacted in, or some mirrors may be contacted
+ // in parallel, so this should be considered a preference rather than a guarantee of ordering.
+ // "mirrors" uses one of the following formats:
+ // host[:port]
+ // host[:port]/namespace[/namespace…]
+ // host[:port]/namespace[/namespace…]/repo
+ // for more information about the format, see the document about the location field:
+ // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table
+ // +optional
+ // +listType=set
+ Mirrors []ImageMirror `json:"mirrors,omitempty"`
+ // mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors.
+ // If unset, the image will continue to be pulled from the repository in the pull spec.
+ // sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.
+ // +optional
+ MirrorSourcePolicy MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
new file mode 100644
index 0000000000..b0cba8ddf8
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go
@@ -0,0 +1,1886 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:subresource:status
+
+// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=infrastructures,scope=Cluster
+// +kubebuilder:subresource:status
+type Infrastructure struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec InfrastructureSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status InfrastructureStatus `json:"status"`
+}
+
+// InfrastructureSpec contains settings that apply to the cluster infrastructure.
+type InfrastructureSpec struct {
+ // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file.
+ // This configuration file is used to configure the Kubernetes cloud provider integration
+ // when using the built-in cloud provider integration or the external cloud controller manager.
+ // The namespace for this config map is openshift-config.
+ //
+ // cloudConfig should only be consumed by the kube_cloud_config controller.
+ // The controller is responsible for using the user configuration in the spec
+ // for various platforms and combining that with the user provided ConfigMap in this field
+ // to create a stitched kube cloud config.
+ // The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace
+ // with the kube cloud config is stored in `cloud.conf` key.
+ // All the clients are expected to use the generated ConfigMap only.
+ //
+ // +optional
+ CloudConfig ConfigMapFileReference `json:"cloudConfig"`
+
+ // platformSpec holds desired information specific to the underlying
+ // infrastructure provider.
+ PlatformSpec PlatformSpec `json:"platformSpec,omitempty"`
+}
+
+// InfrastructureStatus describes the infrastructure the cluster is leveraging.
+type InfrastructureStatus struct {
+ // infrastructureName uniquely identifies a cluster with a human friendly name.
+ // Once set it should not be changed. Must be of max length 27 and must have only
+ // alphanumeric or hyphen characters.
+ InfrastructureName string `json:"infrastructureName"`
+
+ // platform is the underlying infrastructure provider for the cluster.
+ //
+ // Deprecated: Use platformStatus.type instead.
+ Platform PlatformType `json:"platform,omitempty"`
+
+ // platformStatus holds status information specific to the underlying
+ // infrastructure provider.
+ // +optional
+ PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"`
+
+ // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering
+ // etcd servers and clients.
+ // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery
+ // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.
+ EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"`
+
+ // apiServerURL is a valid URI with scheme 'https', address and
+ // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console
+ // to tell users where to find the Kubernetes API.
+ APIServerURL string `json:"apiServerURL"`
+
+ // apiServerInternalURL is a valid URI with scheme 'https',
+ // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components
+ // like kubelets, to contact the Kubernetes API server using the
+ // infrastructure provider rather than Kubernetes networking.
+ APIServerInternalURL string `json:"apiServerInternalURI"`
+
+ // controlPlaneTopology expresses the expectations for operands that normally run on control nodes.
+ // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster.
+ // The 'SingleReplica' mode will be used in single-node deployments
+ // and the operators should not configure the operand for highly-available operation
+ // The 'External' mode indicates that the control plane is hosted externally to the cluster and that
+ // its components are not visible within the cluster.
+ // +kubebuilder:default=HighlyAvailable
+ // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External
+ ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"`
+
+ // infrastructureTopology expresses the expectations for infrastructure services that do not run on control
+ // plane nodes, usually indicated by a node selector for a `role` value
+ // other than `master`.
+ // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster.
+ // The 'SingleReplica' mode will be used in single-node deployments
+ // and the operators should not configure the operand for highly-available operation
+ // NOTE: External topology mode is not applicable for this field.
+ // +kubebuilder:default=HighlyAvailable
+ // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica
+ InfrastructureTopology TopologyMode `json:"infrastructureTopology"`
+
+ // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster.
+ // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets.
+ // Valid values are "None" and "AllNodes". When omitted, the default value is "None".
+ // The default value of "None" indicates that no nodes will be setup with CPU partitioning.
+ // The "AllNodes" value indicates that all nodes have been setup with CPU partitioning,
+ // and can then be further configured via the PerformanceProfile API.
+ // +kubebuilder:default=None
+ // +default="None"
+ // +kubebuilder:validation:Enum=None;AllNodes
+ // +optional
+ CPUPartitioning CPUPartitioningMode `json:"cpuPartitioning,omitempty"`
+}
+
+// TopologyMode defines the topology mode of the control/infra nodes.
+// NOTE: Enum validation is specified in each field that uses this type,
+// given that External value is not applicable to the InfrastructureTopology
+// field.
+type TopologyMode string
+
+const (
+ // "HighlyAvailable" is for operators to configure high-availability as much as possible.
+ HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable"
+
+ // "SingleReplica" is for operators to avoid spending resources for high-availability purpose.
+ SingleReplicaTopologyMode TopologyMode = "SingleReplica"
+
+ // "External" indicates that the component is running externally to the cluster. When specified
+ // as the control plane topology, operators should avoid scheduling workloads to masters or assume
+ // that any of the control plane components such as kubernetes API server or etcd are visible within
+ // the cluster.
+ ExternalTopologyMode TopologyMode = "External"
+)
+
+// CPUPartitioningMode defines the mode for CPU partitioning
+type CPUPartitioningMode string
+
+const (
+ // CPUPartitioningNone means that no CPU Partitioning is on in this cluster infrastructure
+ CPUPartitioningNone CPUPartitioningMode = "None"
+
+ // CPUPartitioningAllNodes means that all nodes are configured with CPU Partitioning in this cluster
+ CPUPartitioningAllNodes CPUPartitioningMode = "AllNodes"
+)
+
+// PlatformLoadBalancerType defines the type of load balancer used by the cluster.
+type PlatformLoadBalancerType string
+
+const (
+ // LoadBalancerTypeUserManaged is a load balancer with control-plane VIPs managed outside of the cluster by the customer.
+ LoadBalancerTypeUserManaged PlatformLoadBalancerType = "UserManaged"
+
+ // LoadBalancerTypeOpenShiftManagedDefault is the default load balancer with control-plane VIPs managed by the OpenShift cluster.
+ LoadBalancerTypeOpenShiftManagedDefault PlatformLoadBalancerType = "OpenShiftManagedDefault"
+)
+
+// PlatformType is a specific supported infrastructure provider.
+// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix;External
+type PlatformType string
+
+const (
+ // AWSPlatformType represents Amazon Web Services infrastructure.
+ AWSPlatformType PlatformType = "AWS"
+
+ // AzurePlatformType represents Microsoft Azure infrastructure.
+ AzurePlatformType PlatformType = "Azure"
+
+ // BareMetalPlatformType represents managed bare metal infrastructure.
+ BareMetalPlatformType PlatformType = "BareMetal"
+
+ // GCPPlatformType represents Google Cloud Platform infrastructure.
+ GCPPlatformType PlatformType = "GCP"
+
+ // LibvirtPlatformType represents libvirt infrastructure.
+ LibvirtPlatformType PlatformType = "Libvirt"
+
+ // OpenStackPlatformType represents OpenStack infrastructure.
+ OpenStackPlatformType PlatformType = "OpenStack"
+
+ // NonePlatformType means there is no infrastructure provider.
+ NonePlatformType PlatformType = "None"
+
+ // VSpherePlatformType represents VMWare vSphere infrastructure.
+ VSpherePlatformType PlatformType = "VSphere"
+
+ // OvirtPlatformType represents oVirt/RHV infrastructure.
+ OvirtPlatformType PlatformType = "oVirt"
+
+ // IBMCloudPlatformType represents IBM Cloud infrastructure.
+ IBMCloudPlatformType PlatformType = "IBMCloud"
+
+ // KubevirtPlatformType represents KubeVirt/Openshift Virtualization infrastructure.
+ KubevirtPlatformType PlatformType = "KubeVirt"
+
+ // EquinixMetalPlatformType represents Equinix Metal infrastructure.
+ EquinixMetalPlatformType PlatformType = "EquinixMetal"
+
+ // PowerVSPlatformType represents IBM Power Systems Virtual Servers infrastructure.
+ PowerVSPlatformType PlatformType = "PowerVS"
+
+ // AlibabaCloudPlatformType represents Alibaba Cloud infrastructure.
+ AlibabaCloudPlatformType PlatformType = "AlibabaCloud"
+
+ // NutanixPlatformType represents Nutanix infrastructure.
+ NutanixPlatformType PlatformType = "Nutanix"
+
+ // ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.
+ ExternalPlatformType PlatformType = "External"
+)
+
+// IBMCloudProviderType is a specific supported IBM Cloud provider cluster type
+type IBMCloudProviderType string
+
+const (
+ // Classic means that the IBM Cloud cluster is using classic infrastructure
+ IBMCloudProviderTypeClassic IBMCloudProviderType = "Classic"
+
+ // VPC means that the IBM Cloud cluster is using VPC infrastructure
+ IBMCloudProviderTypeVPC IBMCloudProviderType = "VPC"
+
+ // IBMCloudProviderTypeUPI means that the IBM Cloud cluster is using user provided infrastructure.
+ // This is utilized in IBM Cloud Satellite environments.
+ IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI"
+)
+
+// DNSType indicates whether the cluster DNS is hosted by the cluster or Core DNS .
+type DNSType string
+
+const (
+ // ClusterHosted indicates that a DNS solution other than the default provided by the
+ // cloud platform is in use. In this mode, the cluster hosts a DNS solution during installation and the
+ // user is expected to provide their own DNS solution post-install.
+ // When the DNS solution is `ClusterHosted`, the cluster will continue to use the
+ // default Load Balancers provided by the cloud platform.
+ ClusterHostedDNSType DNSType = "ClusterHosted"
+
+ // PlatformDefault indicates that the cluster is using the default DNS solution for the
+ // cloud platform. OpenShift is responsible for all the LB and DNS configuration needed for the
+ // cluster to be functional with no intervention from the user. To accomplish this, OpenShift
+ // configures the default LB and DNS solutions provided by the underlying cloud.
+ PlatformDefaultDNSType DNSType = "PlatformDefault"
+)
+
+// ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.
+type ExternalPlatformSpec struct {
+ // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time.
+ // This field is solely for informational and reporting purposes and is not expected to be used for decision-making.
+ // +kubebuilder:default:="Unknown"
+ // +default="Unknown"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == 'Unknown' || self == oldSelf",message="platform name cannot be changed once set"
+ // +optional
+ PlatformName string `json:"platformName,omitempty"`
+}
+
+// PlatformSpec holds the desired state specific to the underlying infrastructure provider
+// of the current cluster. Since these are used at spec-level for the underlying cluster, it
+// is supposed that only one of the spec structs is set.
+type PlatformSpec struct {
+ // type is the underlying infrastructure provider for the cluster. This
+ // value controls whether infrastructure automation such as service load
+ // balancers, dynamic volume provisioning, machine creation and deletion, and
+ // other integrations are enabled. If None, no infrastructure automation is
+ // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt",
+ // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS",
+ // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms,
+ // and must handle unrecognized platforms as None if they do not support that platform.
+ //
+ // +unionDiscriminator
+ Type PlatformType `json:"type"`
+
+ // AWS contains settings specific to the Amazon Web Services infrastructure provider.
+ // +optional
+ AWS *AWSPlatformSpec `json:"aws,omitempty"`
+
+ // Azure contains settings specific to the Azure infrastructure provider.
+ // +optional
+ Azure *AzurePlatformSpec `json:"azure,omitempty"`
+
+ // GCP contains settings specific to the Google Cloud Platform infrastructure provider.
+ // +optional
+ GCP *GCPPlatformSpec `json:"gcp,omitempty"`
+
+ // BareMetal contains settings specific to the BareMetal platform.
+ // +optional
+ BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"`
+
+ // OpenStack contains settings specific to the OpenStack infrastructure provider.
+ // +optional
+ OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"`
+
+ // Ovirt contains settings specific to the oVirt infrastructure provider.
+ // +optional
+ Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"`
+
+ // VSphere contains settings specific to the VSphere infrastructure provider.
+ // +optional
+ VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"`
+
+ // IBMCloud contains settings specific to the IBMCloud infrastructure provider.
+ // +optional
+ IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"`
+
+ // Kubevirt contains settings specific to the kubevirt infrastructure provider.
+ // +optional
+ Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"`
+
+ // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.
+ // +optional
+ EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"`
+
+ // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.
+ // +optional
+ PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"`
+
+ // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.
+ // +optional
+ AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"`
+
+ // Nutanix contains settings specific to the Nutanix infrastructure provider.
+ // +optional
+ Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"`
+
+ // ExternalPlatformType represents generic infrastructure provider.
+ // Platform-specific components should be supplemented separately.
+ // +optional
+ External *ExternalPlatformSpec `json:"external,omitempty"`
+}
+
+// CloudControllerManagerState defines whether Cloud Controller Manager presence is expected or not
+type CloudControllerManagerState string
+
+const (
+ // Cloud Controller Manager is enabled and expected to be installed.
+ // This value indicates that new nodes should be tainted as uninitialized when created,
+ // preventing them from running workloads until they are initialized by the cloud controller manager.
+ CloudControllerManagerExternal CloudControllerManagerState = "External"
+
+ // Cloud Controller Manager is disabled and not expected to be installed.
+ // This value indicates that new nodes should not be tainted
+ // and no extra node initialization is expected from the cloud controller manager.
+ CloudControllerManagerNone CloudControllerManagerState = "None"
+)
+
+// CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings
+// +kubebuilder:validation:XValidation:rule="(has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != \"External\")",message="state may not be added or removed once set"
+type CloudControllerManagerStatus struct {
+ // state determines whether or not an external Cloud Controller Manager is expected to
+ // be installed within the cluster.
+ // https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
+ //
+ // Valid values are "External", "None" and omitted.
+ // When set to "External", new nodes will be tainted as uninitialized when created,
+ // preventing them from running workloads until they are initialized by the cloud controller manager.
+ // When omitted or set to "None", new nodes will be not tainted
+ // and no extra initialization from the cloud controller manager is expected.
+ // +kubebuilder:validation:Enum="";External;None
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="state is immutable once set"
+ // +optional
+ State CloudControllerManagerState `json:"state"`
+}
+
+// ExternalPlatformStatus holds the current status of the generic External infrastructure provider.
+// +kubebuilder:validation:XValidation:rule="has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager)",message="cloudControllerManager may not be added or removed once set"
+type ExternalPlatformStatus struct {
+ // cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI).
+ // When omitted, new nodes will be not tainted
+ // and no extra initialization from the cloud controller manager is expected.
+ // +optional
+ CloudControllerManager CloudControllerManagerStatus `json:"cloudControllerManager"`
+}
+
+// PlatformStatus holds the current status specific to the underlying infrastructure provider
+// of the current cluster. Since these are used at status-level for the underlying cluster, it
+// is supposed that only one of the status structs is set.
+type PlatformStatus struct {
+ // type is the underlying infrastructure provider for the cluster. This
+ // value controls whether infrastructure automation such as service load
+ // balancers, dynamic volume provisioning, machine creation and deletion, and
+ // other integrations are enabled. If None, no infrastructure automation is
+ // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt",
+ // "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None".
+ // Individual components may not support all platforms, and must handle
+ // unrecognized platforms as None if they do not support that platform.
+ //
+ // This value will be synced with to the `status.platform` and `status.platformStatus.type`.
+ // Currently this value cannot be changed once set.
+ Type PlatformType `json:"type"`
+
+ // AWS contains settings specific to the Amazon Web Services infrastructure provider.
+ // +optional
+ AWS *AWSPlatformStatus `json:"aws,omitempty"`
+
+ // Azure contains settings specific to the Azure infrastructure provider.
+ // +optional
+ Azure *AzurePlatformStatus `json:"azure,omitempty"`
+
+ // GCP contains settings specific to the Google Cloud Platform infrastructure provider.
+ // +optional
+ GCP *GCPPlatformStatus `json:"gcp,omitempty"`
+
+ // BareMetal contains settings specific to the BareMetal platform.
+ // +optional
+ BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"`
+
+ // OpenStack contains settings specific to the OpenStack infrastructure provider.
+ // +optional
+ OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"`
+
+ // Ovirt contains settings specific to the oVirt infrastructure provider.
+ // +optional
+ Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"`
+
+ // VSphere contains settings specific to the VSphere infrastructure provider.
+ // +optional
+ VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"`
+
+ // IBMCloud contains settings specific to the IBMCloud infrastructure provider.
+ // +optional
+ IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"`
+
+ // Kubevirt contains settings specific to the kubevirt infrastructure provider.
+ // +optional
+ Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"`
+
+ // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.
+ // +optional
+ EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"`
+
+ // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.
+ // +optional
+ PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"`
+
+ // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.
+ // +optional
+ AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"`
+
+ // Nutanix contains settings specific to the Nutanix infrastructure provider.
+ // +optional
+ Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"`
+
+ // External contains settings specific to the generic External infrastructure provider.
+ // +optional
+ External *ExternalPlatformStatus `json:"external,omitempty"`
+}
+
+// AWSServiceEndpoint store the configuration of a custom url to
+// override existing defaults of AWS Services.
+type AWSServiceEndpoint struct {
+ // name is the name of the AWS service.
+ // The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html
+ // This must be provided and cannot be empty.
+ //
+ // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$`
+ Name string `json:"name"`
+
+ // url is fully qualified URI with scheme https, that overrides the default generated
+ // endpoint for a client.
+ // This must be provided and cannot be empty.
+ //
+ // +kubebuilder:validation:Pattern=`^https://`
+ URL string `json:"url"`
+}
+
+// AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type AWSPlatformSpec struct {
+ // serviceEndpoints list contains custom endpoints which will override default
+ // service endpoint of AWS Services.
+ // There must be only one ServiceEndpoint for a service.
+ // +listType=atomic
+ // +optional
+ ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"`
+}
+
+// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.
+type AWSPlatformStatus struct {
+ // region holds the default AWS region for new AWS resources created by the cluster.
+ Region string `json:"region"`
+
+ // ServiceEndpoints list contains custom endpoints which will override default
+ // service endpoint of AWS Services.
+ // There must be only one ServiceEndpoint for a service.
+ // +listType=atomic
+ // +optional
+ ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"`
+
+ // resourceTags is a list of additional tags to apply to AWS resources created for the cluster.
+ // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources.
+ // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags
+ // available for the user.
+ // +kubebuilder:validation:MaxItems=25
+ // +listType=atomic
+ // +optional
+ ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"`
+}
+
+// AWSResourceTag is a tag to apply to AWS resources created for the cluster.
+type AWSResourceTag struct {
+ // key is the key of the tag
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=128
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
+ // +required
+ Key string `json:"key"`
+ // value is the value of the tag.
+ // Some AWS service do not support empty values. Since tags are added to resources in many services, the
+ // length of the tag value must meet the requirements of all services.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$`
+ // +required
+ Value string `json:"value"`
+}
+
+// AzurePlatformSpec holds the desired state of the Azure infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type AzurePlatformSpec struct{}
+
+// AzurePlatformStatus holds the current status of the Azure infrastructure provider.
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation"
+type AzurePlatformStatus struct {
+ // resourceGroupName is the Resource Group for new Azure resources created for the cluster.
+ ResourceGroupName string `json:"resourceGroupName"`
+
+ // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster.
+ // If empty, the value is same as ResourceGroupName.
+ // +optional
+ NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
+
+ // cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK
+ // with the appropriate Azure API endpoints.
+ // If empty, the value is equal to `AzurePublicCloud`.
+ // +optional
+ CloudName AzureCloudEnvironment `json:"cloudName,omitempty"`
+
+ // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.
+ // +optional
+ ARMEndpoint string `json:"armEndpoint,omitempty"`
+
+ // resourceTags is a list of additional tags to apply to Azure resources created for the cluster.
+ // See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources.
+ // Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags
+ // may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.
+ // +kubebuilder:validation:MaxItems=10
+ // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation"
+ // +listType=atomic
+ // +optional
+ ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"`
+}
+
+// AzureResourceTag is a tag to apply to Azure resources created for the cluster.
+type AzureResourceTag struct {
+ // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key
+ // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric
+ // characters and the following special characters `_ . -`.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=128
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$`
+ Key string `json:"key"`
+ // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value
+ // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$`
+ Value string `json:"value"`
+}
+
+// AzureCloudEnvironment is the name of the Azure cloud environment
+// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud
+type AzureCloudEnvironment string
+
+const (
+ // AzurePublicCloud is the general-purpose, public Azure cloud environment.
+ AzurePublicCloud AzureCloudEnvironment = "AzurePublicCloud"
+
+ // AzureUSGovernmentCloud is the Azure cloud environment for the US government.
+ AzureUSGovernmentCloud AzureCloudEnvironment = "AzureUSGovernmentCloud"
+
+ // AzureChinaCloud is the Azure cloud environment used in China.
+ AzureChinaCloud AzureCloudEnvironment = "AzureChinaCloud"
+
+ // AzureGermanCloud is the Azure cloud environment used in Germany.
+ AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud"
+
+ // AzureStackCloud is the Azure cloud environment used at the edge and on premises.
+ AzureStackCloud AzureCloudEnvironment = "AzureStackCloud"
+)
+
+// GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type GCPPlatformSpec struct{}
+
+// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation"
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation"
+type GCPPlatformStatus struct {
+ // resourceGroupName is the Project ID for new GCP resources created for the cluster.
+ ProjectID string `json:"projectID"`
+
+ // region holds the region for new GCP resources created for the cluster.
+ Region string `json:"region"`
+
+ // resourceLabels is a list of additional labels to apply to GCP resources created for the cluster.
+ // See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources.
+ // GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use,
+ // allowing 32 labels for user configuration.
+ // +kubebuilder:validation:MaxItems=32
+ // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceLabels are immutable and may only be configured during installation"
+ // +listType=map
+ // +listMapKey=key
+ // +optional
+ // +openshift:enable:FeatureGate=GCPLabelsTags
+ ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"`
+
+ // resourceTags is a list of additional tags to apply to GCP resources created for the cluster.
+ // See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on
+ // tagging GCP resources. GCP supports a maximum of 50 tags per resource.
+ // +kubebuilder:validation:MaxItems=50
+ // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation"
+ // +listType=map
+ // +listMapKey=key
+ // +optional
+ // +openshift:enable:FeatureGate=GCPLabelsTags
+ ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"`
+
+ // This field was introduced and removed under tech preview.
+ // To avoid conflicts with serialisation, this field name may never be used again.
+ // Tombstone the field as a reminder.
+ // ClusterHostedDNS ClusterHostedDNS `json:"clusterHostedDNS,omitempty"`
+
+ // cloudLoadBalancerConfig is a union that contains the IP addresses of API,
+ // API-Int and Ingress Load Balancers created on the cloud platform. These
+ // values would not be populated on on-prem platforms. These Load Balancer
+ // IPs are used to configure the in-cluster DNS instances for API, API-Int
+ // and Ingress services. `dnsType` is expected to be set to `ClusterHosted`
+ // when these Load Balancer IP addresses are populated and used.
+ //
+ // +default={"dnsType": "PlatformDefault"}
+ // +kubebuilder:default={"dnsType": "PlatformDefault"}
+ // +openshift:enable:FeatureGate=GCPClusterHostedDNS
+ // +optional
+ // +nullable
+ CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"`
+}
+
+// GCPResourceLabel is a label to apply to GCP resources created for the cluster.
+type GCPResourceLabel struct {
+ // key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty.
+ // Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters,
+ // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io`
+ // and `openshift-io`.
+ // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`"
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$`
+ Key string `json:"key"`
+
+ // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty.
+ // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$`
+ Value string `json:"value"`
+}
+
+// GCPResourceTag is a tag to apply to GCP resources created for the cluster.
+type GCPResourceTag struct {
+ // parentID is the ID of the hierarchical resource where the tags are defined,
+ // e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages:
+ // https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id,
+ // https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects.
+ // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes.
+ // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers,
+ // and hyphens, and must start with a letter, and cannot end with a hyphen.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=32
+ // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)`
+ ParentID string `json:"parentID"`
+
+ // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty.
+ // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase
+ // alphanumeric characters, and the following special characters `._-`.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$`
+ Key string `json:"key"`
+
+ // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty.
+ // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase
+ // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$`
+ Value string `json:"value"`
+}
+
+// CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS
+// solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's
+// Load Balancer configuration needs to be provided so that the DNS solution hosted
+// within the cluster can be configured with those values.
+// +kubebuilder:validation:XValidation:rule="has(self.dnsType) && self.dnsType != 'ClusterHosted' ? !has(self.clusterHosted) : true",message="clusterHosted is permitted only when dnsType is ClusterHosted"
+// +union
+type CloudLoadBalancerConfig struct {
+ // dnsType indicates the type of DNS solution in use within the cluster. Its default value of
+ // `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform.
+ // It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode,
+ // the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed.
+ // The cluster's use of the cloud's Load Balancers is unaffected by this setting.
+ // The value is immutable after it has been set at install time.
+ // Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS.
+ // Enabling this functionality allows the user to start their own DNS solution outside the cluster after
+ // installation is complete. The customer would be responsible for configuring this custom DNS solution,
+ // and it can be run in addition to the in-cluster DNS solution.
+ // +default="PlatformDefault"
+ // +kubebuilder:default:="PlatformDefault"
+ // +kubebuilder:validation:Enum="ClusterHosted";"PlatformDefault"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="dnsType is immutable"
+ // +optional
+ // +unionDiscriminator
+ DNSType DNSType `json:"dnsType,omitempty"`
+
+ // clusterHosted holds the IP addresses of API, API-Int and Ingress Load
+ // Balancers on Cloud Platforms. The DNS solution hosted within the cluster
+ // use these IP addresses to provide resolution for API, API-Int and Ingress
+ // services.
+ // +optional
+ // +unionMember,optional
+ ClusterHosted *CloudLoadBalancerIPs `json:"clusterHosted,omitempty"`
+}
+
+// CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API,
+// API-Int and Ingress Load balancers. They will be populated as soon as the
+// respective Load Balancers have been configured. These values are utilized
+// to configure the DNS solution hosted within the cluster.
+type CloudLoadBalancerIPs struct {
+ // apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service.
+ // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses.
+ // Entries in the apiIntLoadBalancerIPs must be unique.
+ // A maximum of 16 IP addresses are permitted.
+ // +kubebuilder:validation:Format=ip
+ // +listType=set
+ // +kubebuilder:validation:MaxItems=16
+ // +optional
+ APIIntLoadBalancerIPs []IP `json:"apiIntLoadBalancerIPs,omitempty"`
+
+ // apiLoadBalancerIPs holds Load Balancer IPs for the API service.
+ // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses.
+ // Could be empty for private clusters.
+ // Entries in the apiLoadBalancerIPs must be unique.
+ // A maximum of 16 IP addresses are permitted.
+ // +kubebuilder:validation:Format=ip
+ // +listType=set
+ // +kubebuilder:validation:MaxItems=16
+ // +optional
+ APILoadBalancerIPs []IP `json:"apiLoadBalancerIPs,omitempty"`
+
+ // ingressLoadBalancerIPs holds IPs for Ingress Load Balancers.
+ // These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses.
+ // Entries in the ingressLoadBalancerIPs must be unique.
+ // A maximum of 16 IP addresses are permitted.
+ // +kubebuilder:validation:Format=ip
+ // +listType=set
+ // +kubebuilder:validation:MaxItems=16
+ // +optional
+ IngressLoadBalancerIPs []IP `json:"ingressLoadBalancerIPs,omitempty"`
+}
+
+// BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform.
+// +union
+type BareMetalPlatformLoadBalancer struct {
+ // type defines the type of load balancer used by the cluster on BareMetal platform
+ // which can be a user-managed or openshift-managed load balancer
+ // that is to be used for the OpenShift API and Ingress endpoints.
+ // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing
+ // defined in the machine config operator will be deployed.
+ // When set to UserManaged these static pods will not be deployed and it is expected that
+ // the load balancer is configured out of band by the deployer.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default.
+ // The default value is OpenShiftManagedDefault.
+ // +default="OpenShiftManagedDefault"
+ // +kubebuilder:default:="OpenShiftManagedDefault"
+ // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set"
+ // +optional
+ // +unionDiscriminator
+ Type PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set"
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set"
+type BareMetalPlatformSpec struct {
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers.
+ // In dual stack clusters this list contains two IP addresses, one from IPv4
+ // family and one from IPv6.
+ // In single stack clusters a single IP address is expected.
+ // When omitted, values from the status.apiServerInternalIPs will be used.
+ // Once set, the list cannot be completely removed (but its second entry can).
+ //
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ // +optional
+ APIServerInternalIPs []IP `json:"apiServerInternalIPs"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names.
+ // In dual stack clusters this list contains two IP addresses, one from IPv4
+ // family and one from IPv6.
+ // In single stack clusters a single IP address is expected.
+ // When omitted, values from the status.ingressIPs will be used.
+ // Once set, the list cannot be completely removed (but its second entry can).
+ //
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ // +optional
+ IngressIPs []IP `json:"ingressIPs"`
+
+ // machineNetworks are IP networks used to connect all the OpenShift cluster
+ // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6,
+ // for example "10.0.0.0/8" or "fd00::/8".
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=32
+ // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))"
+ // +optional
+ MachineNetworks []CIDR `json:"machineNetworks"`
+}
+
+// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.
+// For more information about the network architecture used with the BareMetal platform type, see:
+// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md
+type BareMetalPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ //
+ // Deprecated: Use APIServerInternalIPs instead.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers. In dual
+ // stack clusters this list contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ APIServerInternalIPs []string `json:"apiServerInternalIPs"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ //
+ // Deprecated: Use IngressIPs instead.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names. In dual stack clusters this list
+ // contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ IngressIPs []string `json:"ingressIPs"`
+
+ // nodeDNSIP is the IP address for the internal DNS used by the
+ // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
+ // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
+ // BareMetal deployments. In order to minimize necessary changes to the
+ // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
+ // to the nodes in the cluster.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+
+ // loadBalancer defines how the load balancer used by the cluster is configured.
+ // +default={"type": "OpenShiftManagedDefault"}
+ // +kubebuilder:default={"type": "OpenShiftManagedDefault"}
+ // +openshift:enable:FeatureGate=BareMetalLoadBalancer
+ // +optional
+ LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"`
+
+ // machineNetworks are IP networks used to connect all the OpenShift cluster nodes.
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=32
+ // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))"
+ // +optional
+ MachineNetworks []CIDR `json:"machineNetworks"`
+}
+
+// OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform.
+// +union
+type OpenStackPlatformLoadBalancer struct {
+ // type defines the type of load balancer used by the cluster on OpenStack platform
+ // which can be a user-managed or openshift-managed load balancer
+ // that is to be used for the OpenShift API and Ingress endpoints.
+ // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing
+ // defined in the machine config operator will be deployed.
+ // When set to UserManaged these static pods will not be deployed and it is expected that
+ // the load balancer is configured out of band by the deployer.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default.
+ // The default value is OpenShiftManagedDefault.
+ // +default="OpenShiftManagedDefault"
+ // +kubebuilder:default:="OpenShiftManagedDefault"
+ // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set"
+ // +optional
+ // +unionDiscriminator
+ Type PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set"
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set"
+type OpenStackPlatformSpec struct {
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers.
+ // In dual stack clusters this list contains two IP addresses, one from IPv4
+ // family and one from IPv6.
+ // In single stack clusters a single IP address is expected.
+ // When omitted, values from the status.apiServerInternalIPs will be used.
+ // Once set, the list cannot be completely removed (but its second entry can).
+ //
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ // +optional
+ APIServerInternalIPs []IP `json:"apiServerInternalIPs"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names.
+ // In dual stack clusters this list contains two IP addresses, one from IPv4
+ // family and one from IPv6.
+ // In single stack clusters a single IP address is expected.
+ // When omitted, values from the status.ingressIPs will be used.
+ // Once set, the list cannot be completely removed (but its second entry can).
+ //
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ // +optional
+ IngressIPs []IP `json:"ingressIPs"`
+
+ // machineNetworks are IP networks used to connect all the OpenShift cluster
+ // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6,
+ // for example "10.0.0.0/8" or "fd00::/8".
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=32
+ // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))"
+ // +optional
+ MachineNetworks []CIDR `json:"machineNetworks"`
+}
+
+// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.
+type OpenStackPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ //
+ // Deprecated: Use APIServerInternalIPs instead.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers. In dual
+ // stack clusters this list contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ APIServerInternalIPs []string `json:"apiServerInternalIPs"`
+
+ // cloudName is the name of the desired OpenStack cloud in the
+ // client configuration file (`clouds.yaml`).
+ CloudName string `json:"cloudName,omitempty"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ //
+ // Deprecated: Use IngressIPs instead.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names. In dual stack clusters this list
+ // contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ IngressIPs []string `json:"ingressIPs"`
+
+ // nodeDNSIP is the IP address for the internal DNS used by the
+ // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
+ // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
+ // OpenStack deployments. In order to minimize necessary changes to the
+ // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
+ // to the nodes in the cluster.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+
+ // loadBalancer defines how the load balancer used by the cluster is configured.
+ // +default={"type": "OpenShiftManagedDefault"}
+ // +kubebuilder:default={"type": "OpenShiftManagedDefault"}
+ // +optional
+ LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"`
+
+ // machineNetworks are IP networks used to connect all the OpenShift cluster nodes.
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=32
+ // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))"
+ // +optional
+ MachineNetworks []CIDR `json:"machineNetworks"`
+}
+
+// OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform.
+// +union
+type OvirtPlatformLoadBalancer struct {
+ // type defines the type of load balancer used by the cluster on Ovirt platform
+ // which can be a user-managed or openshift-managed load balancer
+ // that is to be used for the OpenShift API and Ingress endpoints.
+ // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing
+ // defined in the machine config operator will be deployed.
+ // When set to UserManaged these static pods will not be deployed and it is expected that
+ // the load balancer is configured out of band by the deployer.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default.
+ // The default value is OpenShiftManagedDefault.
+ // +default="OpenShiftManagedDefault"
+ // +kubebuilder:default:="OpenShiftManagedDefault"
+ // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set"
+ // +optional
+ // +unionDiscriminator
+ Type PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type OvirtPlatformSpec struct{}
+
+// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.
+type OvirtPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ //
+ // Deprecated: Use APIServerInternalIPs instead.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers. In dual
+ // stack clusters this list contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=set
+ APIServerInternalIPs []string `json:"apiServerInternalIPs"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ //
+ // Deprecated: Use IngressIPs instead.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names. In dual stack clusters this list
+ // contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=set
+ IngressIPs []string `json:"ingressIPs"`
+
+ // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+
+ // loadBalancer defines how the load balancer used by the cluster is configured.
+ // +default={"type": "OpenShiftManagedDefault"}
+ // +kubebuilder:default={"type": "OpenShiftManagedDefault"}
+ // +openshift:enable:FeatureGate=BareMetalLoadBalancer
+ // +optional
+ LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"`
+}
+
+// VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform.
+// +union
+type VSpherePlatformLoadBalancer struct {
+ // type defines the type of load balancer used by the cluster on VSphere platform
+ // which can be a user-managed or openshift-managed load balancer
+ // that is to be used for the OpenShift API and Ingress endpoints.
+ // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing
+ // defined in the machine config operator will be deployed.
+ // When set to UserManaged these static pods will not be deployed and it is expected that
+ // the load balancer is configured out of band by the deployer.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default.
+ // The default value is OpenShiftManagedDefault.
+ // +default="OpenShiftManagedDefault"
+ // +kubebuilder:default:="OpenShiftManagedDefault"
+ // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set"
+ // +optional
+ // +unionDiscriminator
+ Type PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and
+// the vCenter topology of that failure domain.
+type VSpherePlatformFailureDomainSpec struct {
+ // name defines the arbitrary but unique name
+ // of a failure domain.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ Name string `json:"name"`
+
+ // region defines the name of a region tag that will
+ // be attached to a vCenter datacenter. The tag
+ // category in vCenter must be named openshift-region.
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=80
+ // +kubebuilder:validation:Required
+ Region string `json:"region"`
+
+ // zone defines the name of a zone tag that will
+ // be attached to a vCenter cluster. The tag
+ // category in vCenter must be named openshift-zone.
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=80
+ // +kubebuilder:validation:Required
+ Zone string `json:"zone"`
+
+ // server is the fully-qualified domain name or the IP address of the vCenter server.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=255
+ // ---
+ // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname
+ Server string `json:"server"`
+
+ // Topology describes a given failure domain using vSphere constructs
+ // +kubebuilder:validation:Required
+ Topology VSpherePlatformTopology `json:"topology"`
+}
+
+// VSpherePlatformTopology holds the required and optional vCenter objects - datacenter,
+// computeCluster, networks, datastore and resourcePool - to provision virtual machines.
+type VSpherePlatformTopology struct {
+ // datacenter is the name of vCenter datacenter in which virtual machines will be located.
+ // The maximum length of the datacenter name is 80 characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=80
+ Datacenter string `json:"datacenter"`
+
+ // computeCluster the absolute path of the vCenter cluster
+ // in which virtual machine will be located.
+ // The absolute path is of the form //host/.
+ // The maximum length of the path is 2048 characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=2048
+ // +kubebuilder:validation:Pattern=`^/.*?/host/.*?`
+ ComputeCluster string `json:"computeCluster"`
+
+ // networks is the list of port group network names within this failure domain.
+ // Currently, we only support a single interface per RHCOS virtual machine.
+ // The available networks (port groups) can be listed using
+ // `govc ls 'network/*'`
+ // The single interface should be the absolute path of the form
+ // //network/.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxItems=1
+ // +kubebuilder:validation:MinItems=1
+ // +listType=atomic
+ Networks []string `json:"networks"`
+
+ // datastore is the absolute path of the datastore in which the
+ // virtual machine is located.
+ // The absolute path is of the form //datastore/
+ // The maximum length of the path is 2048 characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=2048
+ // +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?`
+ Datastore string `json:"datastore"`
+
+ // resourcePool is the absolute path of the resource pool where virtual machines will be
+ // created. The absolute path is of the form //host//Resources/.
+ // The maximum length of the path is 2048 characters.
+ // +kubebuilder:validation:MaxLength=2048
+ // +kubebuilder:validation:Pattern=`^/.*?/host/.*?/Resources.*`
+ // +optional
+ ResourcePool string `json:"resourcePool,omitempty"`
+
+ // folder is the absolute path of the folder where
+ // virtual machines are located. The absolute path
+ // is of the form //vm/.
+ // The maximum length of the path is 2048 characters.
+ // +kubebuilder:validation:MaxLength=2048
+ // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?`
+ // +optional
+ Folder string `json:"folder,omitempty"`
+
+ // template is the full inventory path of the virtual machine or template
+ // that will be cloned when creating new machines in this failure domain.
+ // The maximum length of the path is 2048 characters.
+ //
+ // When omitted, the template will be calculated by the control plane
+ // machineset operator based on the region and zone defined in
+ // VSpherePlatformFailureDomainSpec.
+ // For example, for zone=zonea, region=region1, and infrastructure name=test,
+ // the template path would be calculated as //vm/test-rhcos-region1-zonea.
+ // +openshift:enable:FeatureGate=VSphereControlPlaneMachineSet
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=2048
+ // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?`
+ // +optional
+ Template string `json:"template,omitempty"`
+}
+
+// VSpherePlatformVCenterSpec stores the vCenter connection fields.
+// This is used by the vSphere CCM.
+type VSpherePlatformVCenterSpec struct {
+
+ // server is the fully-qualified domain name or the IP address of the vCenter server.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=255
+ // ---
+ // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname
+ Server string `json:"server"`
+
+ // port is the TCP port that will be used to communicate to
+ // the vCenter endpoint.
+ // When omitted, this means the user has no opinion and
+ // it is up to the platform to choose a sensible default,
+ // which is subject to change over time.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=32767
+ // +optional
+ Port int32 `json:"port,omitempty"`
+
+ // The vCenter Datacenters in which the RHCOS
+ // vm guests are located. This field will
+ // be used by the Cloud Controller Manager.
+ // Each datacenter listed here should be used within
+ // a topology.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ // +listType=set
+ Datacenters []string `json:"datacenters"`
+}
+
+// VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for
+// including and excluding IP ranges in the cloud provider.
+// This would be used for example when multiple network adapters are attached to
+// a guest to help determine which IP address the cloud config manager should use
+// for the external and internal node networking.
+type VSpherePlatformNodeNetworkingSpec struct {
+ // networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs
+ // that will be used in respective status.addresses fields.
+ // ---
+ // + Validation is applied via a patch, we validate the format as cidr
+ // +listType=set
+ // +optional
+ NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"`
+
+ // network VirtualMachine's VM Network names that will be used to when searching
+ // for status.addresses fields. Note that if internal.networkSubnetCIDR and
+ // external.networkSubnetCIDR are not set, then the vNIC associated to this network must
+ // only have a single IP address assigned to it.
+ // The available networks (port groups) can be listed using
+ // `govc ls 'network/*'`
+ // +optional
+ Network string `json:"network,omitempty"`
+
+ // excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting
+ // the IP address from the VirtualMachine's VM for use in the status.addresses fields.
+ // ---
+ // + Validation is applied via a patch, we validate the format as cidr
+ // +listType=atomic
+ // +optional
+ ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"`
+}
+
+// VSpherePlatformNodeNetworking holds the external and internal node networking spec.
+type VSpherePlatformNodeNetworking struct {
+ // external represents the network configuration of the node that is externally routable.
+ // +optional
+ External VSpherePlatformNodeNetworkingSpec `json:"external"`
+ // internal represents the network configuration of the node that is routable only within the cluster.
+ // +optional
+ Internal VSpherePlatformNodeNetworkingSpec `json:"internal"`
+}
+
+// VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider.
+// In the future the cloud provider operator, storage operator and machine operator will
+// use these fields for configuration.
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)",message="apiServerInternalIPs list is required once set"
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.ingressIPs) || has(self.ingressIPs)",message="ingressIPs list is required once set"
+type VSpherePlatformSpec struct {
+ // vcenters holds the connection details for services to communicate with vCenter.
+ // Currently, only a single vCenter is supported.
+ // ---
+ // + If VCenters is not defined use the existing cloud-config configmap defined
+ // + in openshift-config.
+ // +kubebuilder:validation:MinItems=0
+ // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1
+ // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3
+ // +listType=atomic
+ // +optional
+ VCenters []VSpherePlatformVCenterSpec `json:"vcenters,omitempty"`
+
+ // failureDomains contains the definition of region, zone and the vCenter topology.
+ // If this is omitted failure domains (regions and zones) will not be used.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ FailureDomains []VSpherePlatformFailureDomainSpec `json:"failureDomains,omitempty"`
+
+ // nodeNetworking contains the definition of internal and external network constraints for
+ // assigning the node's networking.
+ // If this field is omitted, networking defaults to the legacy
+ // address selection behavior which is to only support a single address and
+ // return the first one found.
+ // +optional
+ NodeNetworking VSpherePlatformNodeNetworking `json:"nodeNetworking,omitempty"`
+
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers.
+ // In dual stack clusters this list contains two IP addresses, one from IPv4
+ // family and one from IPv6.
+ // In single stack clusters a single IP address is expected.
+ // When omitted, values from the status.apiServerInternalIPs will be used.
+ // Once set, the list cannot be completely removed (but its second entry can).
+ //
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ // +optional
+ APIServerInternalIPs []IP `json:"apiServerInternalIPs"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names.
+ // In dual stack clusters this list contains two IP addresses, one from IPv4
+ // family and one from IPv6.
+ // In single stack clusters a single IP address is expected.
+ // When omitted, values from the status.ingressIPs will be used.
+ // Once set, the list cannot be completely removed (but its second entry can).
+ //
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ // +optional
+ IngressIPs []IP `json:"ingressIPs"`
+
+ // machineNetworks are IP networks used to connect all the OpenShift cluster
+ // nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6,
+ // for example "10.0.0.0/8" or "fd00::/8".
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=32
+ // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))"
+ // +optional
+ MachineNetworks []CIDR `json:"machineNetworks"`
+}
+
+// VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.
+type VSpherePlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ //
+ // Deprecated: Use APIServerInternalIPs instead.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers. In dual
+ // stack clusters this list contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ APIServerInternalIPs []string `json:"apiServerInternalIPs"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ //
+ // Deprecated: Use IngressIPs instead.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names. In dual stack clusters this list
+ // contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=atomic
+ IngressIPs []string `json:"ingressIPs"`
+
+ // nodeDNSIP is the IP address for the internal DNS used by the
+ // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP`
+ // provides name resolution for the nodes themselves. There is no DNS-as-a-service for
+ // vSphere deployments. In order to minimize necessary changes to the
+ // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames
+ // to the nodes in the cluster.
+ NodeDNSIP string `json:"nodeDNSIP,omitempty"`
+
+ // loadBalancer defines how the load balancer used by the cluster is configured.
+ // +default={"type": "OpenShiftManagedDefault"}
+ // +kubebuilder:default={"type": "OpenShiftManagedDefault"}
+ // +openshift:enable:FeatureGate=BareMetalLoadBalancer
+ // +optional
+ LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"`
+
+ // machineNetworks are IP networks used to connect all the OpenShift cluster nodes.
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=32
+ // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))"
+ // +optional
+ MachineNetworks []CIDR `json:"machineNetworks"`
+}
+
+// IBMCloudServiceEndpoint stores the configuration of a custom url to
+// override existing defaults of IBM Cloud Services.
+type IBMCloudServiceEndpoint struct {
+ // name is the name of the IBM Cloud service.
+ // Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC.
+ // For example, the IBM Cloud Private IAM service could be configured with the
+ // service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com`
+ // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured
+ // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`
+ //
+ // +kubebuilder:validation:Required
+ Name IBMCloudServiceName `json:"name"`
+
+ // url is fully qualified URI with scheme https, that overrides the default generated
+ // endpoint for a client.
+ // This must be provided and cannot be empty.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL"
+ URL string `json:"url"`
+}
+
+// IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type IBMCloudPlatformSpec struct{}
+
+// IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.
+type IBMCloudPlatformStatus struct {
+ // Location is where the cluster has been deployed
+ Location string `json:"location,omitempty"`
+
+ // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.
+ ResourceGroupName string `json:"resourceGroupName,omitempty"`
+
+ // ProviderType indicates the type of cluster that was created
+ ProviderType IBMCloudProviderType `json:"providerType,omitempty"`
+
+ // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing
+ // the DNS zone for the cluster's base domain
+ CISInstanceCRN string `json:"cisInstanceCRN,omitempty"`
+
+ // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone
+ // for the cluster's base domain
+ DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"`
+
+ // serviceEndpoints is a list of custom endpoints which will override the default
+ // service endpoints of an IBM Cloud service. These endpoints are consumed by
+ // components within the cluster to reach the respective IBM Cloud Services.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ ServiceEndpoints []IBMCloudServiceEndpoint `json:"serviceEndpoints,omitempty"`
+}
+
+// KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type KubevirtPlatformSpec struct{}
+
+// KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.
+type KubevirtPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ IngressIP string `json:"ingressIP,omitempty"`
+}
+
+// EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type EquinixMetalPlatformSpec struct{}
+
+// EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.
+type EquinixMetalPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ IngressIP string `json:"ingressIP,omitempty"`
+}
+
+// PowervsServiceEndpoint stores the configuration of a custom url to
+// override existing defaults of PowerVS Services.
+type PowerVSServiceEndpoint struct {
+ // name is the name of the Power VS service.
+ // Few of the services are
+ // IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api
+ // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller
+ // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$`
+ Name string `json:"name"`
+
+ // url is fully qualified URI with scheme https, that overrides the default generated
+ // endpoint for a client.
+ // This must be provided and cannot be empty.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Format=uri
+ // +kubebuilder:validation:Pattern=`^https://`
+ URL string `json:"url"`
+}
+
+// PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type PowerVSPlatformSpec struct {
+ // serviceEndpoints is a list of custom endpoints which will override the default
+ // service endpoints of a Power VS service.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"`
+}
+
+// PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceGroup) || has(self.resourceGroup)",message="cannot unset resourceGroup once set"
+type PowerVSPlatformStatus struct {
+ // region holds the default Power VS region for new Power VS resources created by the cluster.
+ Region string `json:"region"`
+
+ // zone holds the default zone for the new Power VS resources created by the cluster.
+ // Note: Currently only single-zone OCP clusters are supported
+ Zone string `json:"zone"`
+
+ // resourceGroup is the resource group name for new IBMCloud resources created for a cluster.
+ // The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry.
+ // More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs.
+ // When omitted, the image registry operator won't be able to configure storage,
+ // which results in the image registry cluster operator not being in an available state.
+ //
+ // +kubebuilder:validation:Pattern=^[a-zA-Z0-9-_ ]+$
+ // +kubebuilder:validation:MaxLength=40
+ // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="resourceGroup is immutable once set"
+ // +optional
+ ResourceGroup string `json:"resourceGroup"`
+
+ // serviceEndpoints is a list of custom endpoints which will override the default
+ // service endpoints of a Power VS service.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"`
+
+ // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing
+ // the DNS zone for the cluster's base domain
+ CISInstanceCRN string `json:"cisInstanceCRN,omitempty"`
+
+ // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone
+ // for the cluster's base domain
+ DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"`
+}
+
+// AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type AlibabaCloudPlatformSpec struct{}
+
+// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.
+type AlibabaCloudPlatformStatus struct {
+ // region specifies the region for Alibaba Cloud resources created for the cluster.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$`
+ // +required
+ Region string `json:"region"`
+ // resourceGroupID is the ID of the resource group for the cluster.
+ // +kubebuilder:validation:Pattern=`^(rg-[0-9A-Za-z]+)?$`
+ // +optional
+ ResourceGroupID string `json:"resourceGroupID,omitempty"`
+ // resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.
+ // +kubebuilder:validation:MaxItems=20
+ // +listType=map
+ // +listMapKey=key
+ // +optional
+ ResourceTags []AlibabaCloudResourceTag `json:"resourceTags,omitempty"`
+}
+
+// AlibabaCloudResourceTag is the set of tags to add to apply to resources.
+type AlibabaCloudResourceTag struct {
+ // key is the key of the tag.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=128
+ // +required
+ Key string `json:"key"`
+ // value is the value of the tag.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=128
+ // +required
+ Value string `json:"value"`
+}
+
+// NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform.
+// +union
+type NutanixPlatformLoadBalancer struct {
+ // type defines the type of load balancer used by the cluster on Nutanix platform
+ // which can be a user-managed or openshift-managed load balancer
+ // that is to be used for the OpenShift API and Ingress endpoints.
+ // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing
+ // defined in the machine config operator will be deployed.
+ // When set to UserManaged these static pods will not be deployed and it is expected that
+ // the load balancer is configured out of band by the deployer.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default.
+ // The default value is OpenShiftManagedDefault.
+ // +default="OpenShiftManagedDefault"
+ // +kubebuilder:default:="OpenShiftManagedDefault"
+ // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged"
+ // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set"
+ // +optional
+ // +unionDiscriminator
+ Type PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+type NutanixPlatformSpec struct {
+ // prismCentral holds the endpoint address and port to access the Nutanix Prism Central.
+ // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy.
+ // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the
+ // proxy spec.noProxy list.
+ // +kubebuilder:validation:Required
+ PrismCentral NutanixPrismEndpoint `json:"prismCentral"`
+
+ // prismElements holds one or more endpoint address and port data to access the Nutanix
+ // Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one
+ // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.)
+ // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.)
+ // spread over multiple Prism Elements (clusters) of the Prism Central.
+ // +kubebuilder:validation:Required
+ // +listType=map
+ // +listMapKey=name
+ PrismElements []NutanixPrismElementEndpoint `json:"prismElements"`
+
+ // failureDomains configures failure domains information for the Nutanix platform.
+ // When set, the failure domains defined here may be used to spread Machines across
+ // prism element clusters to improve fault tolerance of the cluster.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ FailureDomains []NutanixFailureDomain `json:"failureDomains"`
+}
+
+// NutanixFailureDomain configures failure domain information for the Nutanix platform.
+type NutanixFailureDomain struct {
+ // name defines the unique name of a failure domain.
+ // Name is required and must be at most 64 characters in length.
+ // It must consist of only lower case alphanumeric characters and hyphens (-).
+ // It must start and end with an alphanumeric character.
+ // This value is arbitrary and is used to identify the failure domain within the platform.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=64
+ // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?`
+ Name string `json:"name"`
+
+ // cluster is to identify the cluster (the Prism Element under management of the Prism Central),
+ // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained
+ // from the Prism Central console or using the prism_central API.
+ // +kubebuilder:validation:Required
+ Cluster NutanixResourceIdentifier `json:"cluster"`
+
+ // subnets holds a list of identifiers (one or more) of the cluster's network subnets
+ // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be
+ // obtained from the Prism Central console or using the prism_central API.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:MaxItems=1
+ // +listType=map
+ // +listMapKey=type
+ Subnets []NutanixResourceIdentifier `json:"subnets"`
+}
+
+// NutanixIdentifierType is an enumeration of different resource identifier types.
+// +kubebuilder:validation:Enum:=UUID;Name
+type NutanixIdentifierType string
+
+const (
+ // NutanixIdentifierUUID is a resource identifier identifying the object by UUID.
+ NutanixIdentifierUUID NutanixIdentifierType = "UUID"
+
+ // NutanixIdentifierName is a resource identifier identifying the object by Name.
+ NutanixIdentifierName NutanixIdentifierType = "Name"
+)
+
+// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'UUID' ? has(self.uuid) : !has(self.uuid)",message="uuid configuration is required when type is UUID, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Name' ? has(self.name) : !has(self.name)",message="name configuration is required when type is Name, and forbidden otherwise"
+// +union
+type NutanixResourceIdentifier struct {
+ // type is the identifier type to use for this resource.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ Type NutanixIdentifierType `json:"type"`
+
+ // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID.
+ // +optional
+ UUID *string `json:"uuid,omitempty"`
+
+ // name is the resource name in the PC. It cannot be empty if the type is Name.
+ // +optional
+ Name *string `json:"name,omitempty"`
+}
+
+// NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)
+type NutanixPrismEndpoint struct {
+ // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=256
+ Address string `json:"address"`
+
+ // port is the port number to access the Nutanix Prism Central or Element (cluster)
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ Port int32 `json:"port"`
+}
+
+// NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster)
+type NutanixPrismElementEndpoint struct {
+ // name is the name of the Prism Element (cluster). This value will correspond with
+ // the cluster field configured on other resources (eg Machines, PVCs, etc).
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=256
+ Name string `json:"name"`
+
+ // endpoint holds the endpoint address and port data of the Prism Element (cluster).
+ // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy.
+ // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the
+ // proxy spec.noProxy list.
+ // +kubebuilder:validation:Required
+ Endpoint NutanixPrismEndpoint `json:"endpoint"`
+}
+
+// NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider.
+type NutanixPlatformStatus struct {
+ // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used
+ // by components inside the cluster, like kubelets using the infrastructure rather
+ // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI
+ // points to. It is the IP for a self-hosted load balancer in front of the API servers.
+ //
+ // Deprecated: Use APIServerInternalIPs instead.
+ APIServerInternalIP string `json:"apiServerInternalIP,omitempty"`
+
+ // apiServerInternalIPs are the IP addresses to contact the Kubernetes API
+ // server that can be used by components inside the cluster, like kubelets
+ // using the infrastructure rather than Kubernetes networking. These are the
+ // IPs for a self-hosted load balancer in front of the API servers. In dual
+ // stack clusters this list contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=set
+ APIServerInternalIPs []string `json:"apiServerInternalIPs"`
+
+ // ingressIP is an external IP which routes to the default ingress controller.
+ // The IP is a suitable target of a wildcard DNS record used to resolve default route host names.
+ //
+ // Deprecated: Use IngressIPs instead.
+ IngressIP string `json:"ingressIP,omitempty"`
+
+ // ingressIPs are the external IPs which route to the default ingress
+ // controller. The IPs are suitable targets of a wildcard DNS record used to
+ // resolve default route host names. In dual stack clusters this list
+ // contains two IPs otherwise only one.
+ //
+ // +kubebuilder:validation:Format=ip
+ // +kubebuilder:validation:MaxItems=2
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf || (size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true)",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address"
+ // +listType=set
+ IngressIPs []string `json:"ingressIPs"`
+
+ // loadBalancer defines how the load balancer used by the cluster is configured.
+ // +default={"type": "OpenShiftManagedDefault"}
+ // +kubebuilder:default={"type": "OpenShiftManagedDefault"}
+ // +openshift:enable:FeatureGate=BareMetalLoadBalancer
+ // +optional
+ LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InfrastructureList is
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type InfrastructureList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Infrastructure `json:"items"`
+}
+
+// IP is an IP address (for example, "10.0.0.0" or "fd00::").
+// +kubebuilder:validation:XValidation:rule="isIP(self)",message="value must be a valid IP address"
+// +kubebuilder:validation:MaxLength:=39
+// +kubebuilder:validation:MinLength:=1
+type IP string
+
+// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8").
+// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address"
+// +kubebuilder:validation:MaxLength:=43
+// +kubebuilder:validation:MinLength:=1
+type CIDR string
diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go
new file mode 100644
index 0000000000..e58ad7f00b
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go
@@ -0,0 +1,339 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Ingress holds cluster-wide information about ingress, including the default ingress domain
+// used for routes. The canonical name is `cluster`.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=ingresses,scope=Cluster
+// +kubebuilder:subresource:status
+type Ingress struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec IngressSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status IngressStatus `json:"status"`
+}
+
+type IngressSpec struct {
+ // domain is used to generate a default host name for a route when the
+ // route's host name is empty. The generated host name will follow this
+ // pattern: "..".
+ //
+ // It is also used as the default wildcard domain suffix for ingress. The
+ // default ingresscontroller domain will follow this pattern: "*.".
+ //
+ // Once set, changing domain is not currently supported.
+ Domain string `json:"domain"`
+
+ // appsDomain is an optional domain to use instead of the one specified
+ // in the domain field when a Route is created without specifying an explicit
+ // host. If appsDomain is nonempty, this value is used to generate default
+ // host values for Route. Unlike domain, appsDomain may be modified after
+ // installation.
+ // This assumes a new ingresscontroller has been setup with a wildcard
+ // certificate.
+ // +optional
+ AppsDomain string `json:"appsDomain,omitempty"`
+
+ // componentRoutes is an optional list of routes that are managed by OpenShift components
+ // that a cluster-admin is able to configure the hostname and serving certificate for.
+ // The namespace and name of each route in this list should match an existing entry in the
+ // status.componentRoutes list.
+ //
+ // To determine the set of configurable Routes, look at namespace and name of entries in the
+ // .status.componentRoutes list, where participating operators write the status of
+ // configurable routes.
+ // +optional
+ // +listType=map
+ // +listMapKey=namespace
+ // +listMapKey=name
+ ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"`
+
+ // requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes
+ // matching the domainPattern/s and namespaceSelector/s that are specified in the policy.
+ // Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route
+ // annotation, and affect route admission.
+ //
+ // A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation:
+ // "haproxy.router.openshift.io/hsts_header"
+ // E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains
+ //
+ // - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector,
+ // then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route
+ // is rejected.
+ // - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies
+ // determines the route's admission status.
+ // - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector,
+ // then it may use any HSTS Policy annotation.
+ //
+ // The HSTS policy configuration may be changed after routes have already been created. An update to a previously
+ // admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration.
+ // However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working.
+ //
+ // Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid.
+ // +optional
+ RequiredHSTSPolicies []RequiredHSTSPolicy `json:"requiredHSTSPolicies,omitempty"`
+
+ // loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure
+ // provider of the current cluster and are required for Ingress Controller to work on OpenShift.
+ // +optional
+ LoadBalancer LoadBalancer `json:"loadBalancer,omitempty"`
+}
+
+// IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider
+// of the current cluster. Since these are used at spec-level for the underlying cluster, it
+// is supposed that only one of the spec structs is set.
+// +union
+type IngressPlatformSpec struct {
+ // type is the underlying infrastructure provider for the cluster.
+ // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt",
+ // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS",
+ // "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms,
+ // and must handle unrecognized platforms as None if they do not support that platform.
+ //
+ // +unionDiscriminator
+ Type PlatformType `json:"type"`
+
+ // aws contains settings specific to the Amazon Web Services infrastructure provider.
+ // +optional
+ AWS *AWSIngressSpec `json:"aws,omitempty"`
+}
+
+type LoadBalancer struct {
+ // platform holds configuration specific to the underlying
+ // infrastructure provider for the ingress load balancers.
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose reasonable defaults. These defaults are subject to change over time.
+ // +optional
+ Platform IngressPlatformSpec `json:"platform,omitempty"`
+}
+
+// AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider.
+// This only includes fields that can be modified in the cluster.
+// +union
+type AWSIngressSpec struct {
+ // type allows user to set a load balancer type.
+ // When this field is set the default ingresscontroller will get created using the specified LBType.
+ // If this field is not set then the default ingress controller of LBType Classic will be created.
+ // Valid values are:
+ //
+ // * "Classic": A Classic Load Balancer that makes routing decisions at either
+ // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See
+ // the following for additional details:
+ //
+ // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb
+ //
+ // * "NLB": A Network Load Balancer that makes routing decisions at the
+ // transport layer (TCP/SSL). See the following for additional details:
+ //
+ // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb
+ // +unionDiscriminator
+ // +kubebuilder:validation:Enum:=NLB;Classic
+ // +kubebuilder:validation:Required
+ Type AWSLBType `json:"type,omitempty"`
+}
+
+type AWSLBType string
+
+const (
+ // NLB is the Network Load Balancer Type of AWS. Using NLB one can set NLB load balancer type for the default ingress controller.
+ NLB AWSLBType = "NLB"
+
+ // Classic is the Classic Load Balancer Type of AWS. Using CLassic one can set Classic load balancer type for the default ingress controller.
+ Classic AWSLBType = "Classic"
+)
+
+// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported.
+// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"
+// +kubebuilder:validation:MinLength=1
+// +kubebuilder:validation:MaxLength=512
+type ConsumingUser string
+
+// Hostname is a host name as defined by RFC-1123.
+// + ---
+// + The left operand of the | is the original kubebuilder hostname validation format, which is incorrect because it
+// + allows upper case letters, disallows hyphen or number in the TLD, and allows labels to start/end in non-alphanumeric
+// + characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256.
+// + ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$
+// +
+// + The right operand of the | is a new pattern that mimics the current API route admission validation on hostname,
+// + except that it allows hostnames longer than the maximum length:
+// + ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$
+// +
+// + Both operand patterns are made available so that modifications on ingress spec can still happen after an invalid hostname
+// + was saved via validation by the incorrect left operand of the | operator.
+// +
+// +kubebuilder:validation:Pattern=`^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$`
+type Hostname string
+
+type IngressStatus struct {
+ // componentRoutes is where participating operators place the current route status for routes whose
+ // hostnames and serving certificates can be customized by the cluster-admin.
+ // +optional
+ // +listType=map
+ // +listMapKey=namespace
+ // +listMapKey=name
+ ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"`
+
+ // defaultPlacement is set at installation time to control which
+ // nodes will host the ingress router pods by default. The options are
+ // control-plane nodes or worker nodes.
+ //
+ // This field works by dictating how the Cluster Ingress Operator will
+ // consider unset replicas and nodePlacement fields in IngressController
+ // resources when creating the corresponding Deployments.
+ //
+ // See the documentation for the IngressController replicas and nodePlacement
+ // fields for more information.
+ //
+ // When omitted, the default value is Workers
+ //
+ // +kubebuilder:validation:Enum:="ControlPlane";"Workers";""
+ // +optional
+ DefaultPlacement DefaultPlacement `json:"defaultPlacement"`
+}
+
+// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.
+type ComponentRouteSpec struct {
+ // namespace is the namespace of the route to customize.
+ //
+ // The namespace and name of this componentRoute must match a corresponding
+ // entry in the list of status.componentRoutes if the route is to be customized.
+ // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Required
+ // +required
+ Namespace string `json:"namespace"`
+
+ // name is the logical name of the route to customize.
+ //
+ // The namespace and name of this componentRoute must match a corresponding
+ // entry in the list of status.componentRoutes if the route is to be customized.
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+
+ // hostname is the hostname that should be used by the route.
+ // +kubebuilder:validation:Required
+ // +required
+ Hostname Hostname `json:"hostname"`
+
+ // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace.
+ // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name.
+ // If the custom hostname uses the default routing suffix of the cluster,
+ // the Secret specification for a serving certificate will not be needed.
+ // +optional
+ ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"`
+}
+
+// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.
+type ComponentRouteStatus struct {
+ // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace
+ // ensures that no two components will conflict and the same component can be installed multiple times.
+ //
+ // The namespace and name of this componentRoute must match a corresponding
+ // entry in the list of spec.componentRoutes if the route is to be customized.
+ // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Required
+ // +required
+ Namespace string `json:"namespace"`
+
+ // name is the logical name of the route to customize. It does not have to be the actual name of a route resource
+ // but it cannot be renamed.
+ //
+ // The namespace and name of this componentRoute must match a corresponding
+ // entry in the list of spec.componentRoutes if the route is to be customized.
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:Required
+ // +required
+ Name string `json:"name"`
+
+ // defaultHostname is the hostname of this route prior to customization.
+ // +kubebuilder:validation:Required
+ // +required
+ DefaultHostname Hostname `json:"defaultHostname"`
+
+ // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.
+ // +kubebuilder:validation:MaxItems=5
+ // +optional
+ ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"`
+
+ // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single
+ // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.
+ // +kubebuilder:validation:MinItems=1
+ // +optional
+ CurrentHostnames []Hostname `json:"currentHostnames,omitempty"`
+
+ // conditions are used to communicate the state of the componentRoutes entry.
+ //
+ // Supported conditions include Available, Degraded and Progressing.
+ //
+ // If available is true, the content served by the route can be accessed by users. This includes cases
+ // where a default may continue to serve content while the customized route specified by the cluster-admin
+ // is being configured.
+ //
+ // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry.
+ // The currentHostnames field may or may not be in effect.
+ //
+ // If Progressing is true, that means the component is taking some action related to the componentRoutes entry.
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:Required
+ // +required
+ RelatedObjects []ObjectReference `json:"relatedObjects"`
+}
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:compatibility-gen:level=1
+type IngressList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Ingress `json:"items"`
+}
+
+// DefaultPlacement defines the default placement of ingress router pods.
+type DefaultPlacement string
+
+const (
+ // "Workers" is for having router pods placed on worker nodes by default.
+ DefaultPlacementWorkers DefaultPlacement = "Workers"
+
+ // "ControlPlane" is for having router pods placed on control-plane nodes by default.
+ DefaultPlacementControlPlane DefaultPlacement = "ControlPlane"
+)
diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go
new file mode 100644
index 0000000000..6656849a7b
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_network.go
@@ -0,0 +1,308 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc.
+// Please view network.spec for an explanation on what applies when configuring this resource.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:compatibility-gen:level=1
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=networks,scope=Cluster
+type Network struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration.
+ // As a general rule, this SHOULD NOT be read directly. Instead, you should
+ // consume the NetworkStatus, as it indicates the currently deployed configuration.
+ // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec NetworkSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status NetworkStatus `json:"status"`
+}
+
+// NetworkSpec is the desired network configuration.
+// As a general rule, this SHOULD NOT be read directly. Instead, you should
+// consume the NetworkStatus, as it indicates the currently deployed configuration.
+// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkDiagnosticsConfig,rule="!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) || self.networkDiagnostics.mode!='Disabled' || !has(self.networkDiagnostics.sourcePlacement) && !has(self.networkDiagnostics.targetPlacement)",message="cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement when networkDiagnostics.mode is Disabled"
+type NetworkSpec struct {
+ // IP address pool to use for pod IPs.
+ // This field is immutable after installation.
+ // +listType=atomic
+ ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
+
+ // IP address pool for services.
+ // Currently, we only support a single entry here.
+ // This field is immutable after installation.
+ // +listType=atomic
+ ServiceNetwork []string `json:"serviceNetwork"`
+
+ // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN).
+ // This should match a value that the cluster-network-operator understands,
+ // or else no networking will be installed.
+ // Currently supported values are:
+ // - OpenShiftSDN
+ // This field is immutable after installation.
+ NetworkType string `json:"networkType"`
+
+ // externalIP defines configuration for controllers that
+ // affect Service.ExternalIP. If nil, then ExternalIP is
+ // not allowed to be set.
+ // +optional
+ ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"`
+
+ // The port range allowed for Services of type NodePort.
+ // If not specified, the default of 30000-32767 will be used.
+ // Such Services without a NodePort specified will have one
+ // automatically allocated from this range.
+ // This parameter can be updated after the cluster is
+ // installed.
+ // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$`
+ ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"`
+
+ // networkDiagnostics defines network diagnostics configuration.
+ //
+ // Takes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io.
+ // If networkDiagnostics is not specified or is empty,
+ // and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true,
+ // the network diagnostics feature will be disabled.
+ //
+ // +optional
+ // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig
+ NetworkDiagnostics NetworkDiagnostics `json:"networkDiagnostics"`
+}
+
+// NetworkStatus is the current network configuration.
+type NetworkStatus struct {
+ // IP address pool to use for pod IPs.
+ // +listType=atomic
+ ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"`
+
+ // IP address pool for services.
+ // Currently, we only support a single entry here.
+ // +listType=atomic
+ ServiceNetwork []string `json:"serviceNetwork,omitempty"`
+
+ // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).
+ NetworkType string `json:"networkType,omitempty"`
+
+ // ClusterNetworkMTU is the MTU for inter-pod networking.
+ ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"`
+
+ // Migration contains the cluster network migration configuration.
+ Migration *NetworkMigration `json:"migration,omitempty"`
+
+ // conditions represents the observations of a network.config current state.
+ // Known .status.conditions.type are: "NetworkTypeMigrationInProgress", "NetworkTypeMigrationMTUReady",
+ // "NetworkTypeMigrationTargetCNIAvailable", "NetworkTypeMigrationTargetCNIInUse",
+ // "NetworkTypeMigrationOriginalCNIPurged" and "NetworkDiagnosticsAvailable"
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +openshift:enable:FeatureGate=NetworkLiveMigration
+ // +openshift:enable:FeatureGate=NetworkDiagnosticsConfig
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
+
+// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs
+// are allocated.
+type ClusterNetworkEntry struct {
+ // The complete block for pod IPs.
+ CIDR string `json:"cidr"`
+
+ // The size (prefix) of block to allocate to each node. If this
+ // field is not used by the plugin, it can be left unset.
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ HostPrefix uint32 `json:"hostPrefix,omitempty"`
+}
+
+// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field
+// of a Service resource.
+type ExternalIPConfig struct {
+ // policy is a set of restrictions applied to the ExternalIP field.
+ // If nil or empty, then ExternalIP is not allowed to be set.
+ // +optional
+ Policy *ExternalIPPolicy `json:"policy,omitempty"`
+
+ // autoAssignCIDRs is a list of CIDRs from which to automatically assign
+ // Service.ExternalIP. These are assigned when the service is of type
+ // LoadBalancer. In general, this is only useful for bare-metal clusters.
+ // In Openshift 3.x, this was misleadingly called "IngressIPs".
+ // Automatically assigned External IPs are not affected by any
+ // ExternalIPPolicy rules.
+ // Currently, only one entry may be provided.
+ // +optional
+ // +listType=atomic
+ AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"`
+}
+
+// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP
+// field in a Service. If the zero struct is supplied, then none are permitted.
+// The policy controller always allows automatically assigned external IPs.
+type ExternalIPPolicy struct {
+ // allowedCIDRs is the list of allowed CIDRs.
+ // +listType=atomic
+ AllowedCIDRs []string `json:"allowedCIDRs,omitempty"`
+
+ // rejectedCIDRs is the list of disallowed CIDRs. These take precedence
+ // over allowedCIDRs.
+ // +optional
+ // +listType=atomic
+ RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type NetworkList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Network `json:"items"`
+}
+
+// NetworkMigration represents the cluster network configuration.
+type NetworkMigration struct {
+ // NetworkType is the target plugin that is to be deployed.
+ // Currently supported values are: OpenShiftSDN, OVNKubernetes
+ // +kubebuilder:validation:Enum={"OpenShiftSDN","OVNKubernetes"}
+ // +optional
+ NetworkType string `json:"networkType,omitempty"`
+
+ // MTU contains the MTU migration configuration.
+ // +optional
+ MTU *MTUMigration `json:"mtu,omitempty"`
+}
+
+// MTUMigration contains infomation about MTU migration.
+type MTUMigration struct {
+ // Network contains MTU migration configuration for the default network.
+ // +optional
+ Network *MTUMigrationValues `json:"network,omitempty"`
+
+ // Machine contains MTU migration configuration for the machine's uplink.
+ // +optional
+ Machine *MTUMigrationValues `json:"machine,omitempty"`
+}
+
+// MTUMigrationValues contains the values for a MTU migration.
+type MTUMigrationValues struct {
+ // To is the MTU to migrate to.
+ // +kubebuilder:validation:Minimum=0
+ To *uint32 `json:"to"`
+
+ // From is the MTU to migrate from.
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ From *uint32 `json:"from,omitempty"`
+}
+
+// NetworkDiagnosticsMode is an enumeration of the available network diagnostics modes
+// Valid values are "", "All", "Disabled".
+// +kubebuilder:validation:Enum:="";All;Disabled
+type NetworkDiagnosticsMode string
+
+const (
+ // NetworkDiagnosticsNoOpinion means that the user has no opinion and the platform is left
+ // to choose reasonable default. The current default is All and is a subject to change over time.
+ NetworkDiagnosticsNoOpinion NetworkDiagnosticsMode = ""
+ // NetworkDiagnosticsAll means that all network diagnostics checks are enabled
+ NetworkDiagnosticsAll NetworkDiagnosticsMode = "All"
+ // NetworkDiagnosticsDisabled means that network diagnostics is disabled
+ NetworkDiagnosticsDisabled NetworkDiagnosticsMode = "Disabled"
+)
+
+// NetworkDiagnostics defines network diagnostics configuration
+
+type NetworkDiagnostics struct {
+ // mode controls the network diagnostics mode
+ //
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose reasonable defaults. These defaults are subject to change over time.
+ // The current default is All.
+ //
+ // +optional
+ Mode NetworkDiagnosticsMode `json:"mode"`
+
+ // sourcePlacement controls the scheduling of network diagnostics source deployment
+ //
+ // See NetworkDiagnosticsSourcePlacement for more details about default values.
+ //
+ // +optional
+ SourcePlacement NetworkDiagnosticsSourcePlacement `json:"sourcePlacement"`
+
+ // targetPlacement controls the scheduling of network diagnostics target daemonset
+ //
+ // See NetworkDiagnosticsTargetPlacement for more details about default values.
+ //
+ // +optional
+ TargetPlacement NetworkDiagnosticsTargetPlacement `json:"targetPlacement"`
+}
+
+// NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components
+type NetworkDiagnosticsSourcePlacement struct {
+ // nodeSelector is the node selector applied to network diagnostics components
+ //
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose reasonable defaults. These defaults are subject to change over time.
+ // The current default is `kubernetes.io/os: linux`.
+ //
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector"`
+
+ // tolerations is a list of tolerations applied to network diagnostics components
+ //
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose reasonable defaults. These defaults are subject to change over time.
+ // The current default is an empty list.
+ //
+ // +optional
+ // +listType=atomic
+ Tolerations []corev1.Toleration `json:"tolerations"`
+}
+
+// NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components
+type NetworkDiagnosticsTargetPlacement struct {
+ // nodeSelector is the node selector applied to network diagnostics components
+ //
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose reasonable defaults. These defaults are subject to change over time.
+ // The current default is `kubernetes.io/os: linux`.
+ //
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector"`
+
+ // tolerations is a list of tolerations applied to network diagnostics components
+ //
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose reasonable defaults. These defaults are subject to change over time.
+ // The current default is `- operator: "Exists"` which means that all taints are tolerated.
+ //
+ // +optional
+ // +listType=atomic
+ Tolerations []corev1.Toleration `json:"tolerations"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go
new file mode 100644
index 0000000000..3dd31f39ad
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_node.go
@@ -0,0 +1,117 @@
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Node holds cluster-wide information about node specific features.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1107
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=nodes,scope=Cluster
+// +kubebuilder:subresource:status
+type Node struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec NodeSpec `json:"spec"`
+
+ // status holds observed values.
+ // +optional
+ Status NodeStatus `json:"status"`
+}
+
+type NodeSpec struct {
+ // CgroupMode determines the cgroups version on the node
+ // +optional
+ CgroupMode CgroupMode `json:"cgroupMode,omitempty"`
+
+ // WorkerLatencyProfile determins the how fast the kubelet is updating
+ // the status and corresponding reaction of the cluster
+ // +optional
+ WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"`
+}
+
+type NodeStatus struct{}
+
+// +kubebuilder:validation:Enum=v1;v2;""
+type CgroupMode string
+
+const (
+ CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift
+ CgroupModeV1 CgroupMode = "v1"
+ CgroupModeV2 CgroupMode = "v2"
+ CgroupModeDefault CgroupMode = CgroupModeV1
+)
+
+// +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction
+type WorkerLatencyProfileType string
+
+const (
+ // Medium Kubelet Update Frequency (heart-beat) and Average Reaction Time to unresponsive Node
+ MediumUpdateAverageReaction WorkerLatencyProfileType = "MediumUpdateAverageReaction"
+
+ // Low Kubelet Update Frequency (heart-beat) and Slow Reaction Time to unresponsive Node
+ LowUpdateSlowReaction WorkerLatencyProfileType = "LowUpdateSlowReaction"
+
+ // Default values of relavent Kubelet, Kube Controller Manager and Kube API Server
+ DefaultUpdateDefaultReaction WorkerLatencyProfileType = "Default"
+)
+
+const (
+ // DefaultNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type
+ DefaultNodeStatusUpdateFrequency = 10 * time.Second
+ // DefaultNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type
+ DefaultNodeMonitorGracePeriod = 40 * time.Second
+ // DefaultNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type
+ DefaultNotReadyTolerationSeconds = 300
+ // DefaultUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of DefaultUpdateDefaultReaction WorkerLatencyProfile type
+ DefaultUnreachableTolerationSeconds = 300
+
+ // MediumNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of MediumUpdateAverageReaction WorkerLatencyProfile type
+ MediumNodeStatusUpdateFrequency = 20 * time.Second
+ // MediumNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of MediumUpdateAverageReaction WorkerLatencyProfile type
+ MediumNodeMonitorGracePeriod = 2 * time.Minute
+ // MediumNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type
+ MediumNotReadyTolerationSeconds = 60
+ // MediumUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of MediumUpdateAverageReaction WorkerLatencyProfile type
+ MediumUnreachableTolerationSeconds = 60
+
+ // LowNodeStatusUpdateFrequency refers to the "--node-status-update-frequency" of the kubelet in case of LowUpdateSlowReaction WorkerLatencyProfile type
+ LowNodeStatusUpdateFrequency = 1 * time.Minute
+ // LowNodeMonitorGracePeriod refers to the "--node-monitor-grace-period" of the Kube Controller Manager in case of LowUpdateSlowReaction WorkerLatencyProfile type
+ LowNodeMonitorGracePeriod = 5 * time.Minute
+ // LowNotReadyTolerationSeconds refers to the "--default-not-ready-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type
+ LowNotReadyTolerationSeconds = 60
+ // LowUnreachableTolerationSeconds refers to the "--default-unreachable-toleration-seconds" of the Kube API Server in case of LowUpdateSlowReaction WorkerLatencyProfile type
+ LowUnreachableTolerationSeconds = 60
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type NodeList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Node `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go
new file mode 100644
index 0000000000..6654479dc8
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go
@@ -0,0 +1,597 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// OAuth Server and Identity Provider Config
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`.
+// It is used to configure the integrated OAuth server.
+// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=oauths,scope=Cluster
+// +kubebuilder:subresource:status
+type OAuth struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec OAuthSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status OAuthStatus `json:"status"`
+}
+
+// OAuthSpec contains desired cluster auth configuration
+type OAuthSpec struct {
+ // identityProviders is an ordered list of ways for a user to identify themselves.
+ // When this list is empty, no identities are provisioned for users.
+ // +optional
+ // +listType=atomic
+ IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"`
+
+ // tokenConfig contains options for authorization and access tokens
+ TokenConfig TokenConfig `json:"tokenConfig"`
+
+ // templates allow you to customize pages like the login page.
+ // +optional
+ Templates OAuthTemplates `json:"templates"`
+}
+
+// OAuthStatus shows current known state of OAuth server in the cluster
+type OAuthStatus struct {
+ // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig)
+}
+
+// TokenConfig holds the necessary configuration options for authorization and access tokens
+type TokenConfig struct {
+ // accessTokenMaxAgeSeconds defines the maximum age of access tokens
+ AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"`
+
+ // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.
+ // +optional
+ AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"`
+
+ // accessTokenInactivityTimeout defines the token inactivity timeout
+ // for tokens granted by any client.
+ // The value represents the maximum amount of time that can occur between
+ // consecutive uses of the token. Tokens become invalid if they are not
+ // used within this temporal window. The user will need to acquire a new
+ // token to regain access once a token times out. Takes valid time
+ // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed
+ // value for duration is 300s (5 minutes). If the timeout is configured
+ // per client, then that value takes precedence. If the timeout value is
+ // not specified and the client does not override the value, then tokens
+ // are valid until their lifetime.
+ //
+ // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value
+ // +optional
+ AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"`
+}
+
+const (
+ // LoginTemplateKey is the key of the login template in a secret
+ LoginTemplateKey = "login.html"
+
+ // ProviderSelectionTemplateKey is the key for the provider selection template in a secret
+ ProviderSelectionTemplateKey = "providers.html"
+
+ // ErrorsTemplateKey is the key for the errors template in a secret
+ ErrorsTemplateKey = "errors.html"
+
+ // BindPasswordKey is the key for the LDAP bind password in a secret
+ BindPasswordKey = "bindPassword"
+
+ // ClientSecretKey is the key for the oauth client secret data in a secret
+ ClientSecretKey = "clientSecret"
+
+ // HTPasswdDataKey is the key for the htpasswd file data in a secret
+ HTPasswdDataKey = "htpasswd"
+)
+
+// OAuthTemplates allow for customization of pages like the login page
+type OAuthTemplates struct {
+ // login is the name of a secret that specifies a go template to use to render the login page.
+ // The key "login.html" is used to locate the template data.
+ // If specified and the secret or expected key is not found, the default login page is used.
+ // If the specified template is not valid, the default login page is used.
+ // If unspecified, the default login page is used.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ Login SecretNameReference `json:"login"`
+
+ // providerSelection is the name of a secret that specifies a go template to use to render
+ // the provider selection page.
+ // The key "providers.html" is used to locate the template data.
+ // If specified and the secret or expected key is not found, the default provider selection page is used.
+ // If the specified template is not valid, the default provider selection page is used.
+ // If unspecified, the default provider selection page is used.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ ProviderSelection SecretNameReference `json:"providerSelection"`
+
+ // error is the name of a secret that specifies a go template to use to render error pages
+ // during the authentication or grant flow.
+ // The key "errors.html" is used to locate the template data.
+ // If specified and the secret or expected key is not found, the default error page is used.
+ // If the specified template is not valid, the default error page is used.
+ // If unspecified, the default error page is used.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ Error SecretNameReference `json:"error"`
+}
+
+// IdentityProvider provides identities for users authenticating using credentials
+type IdentityProvider struct {
+ // name is used to qualify the identities returned by this provider.
+ // - It MUST be unique and not shared by any other identity provider used
+ // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":"
+ // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName
+ Name string `json:"name"`
+
+ // mappingMethod determines how identities from this provider are mapped to users
+ // Defaults to "claim"
+ // +optional
+ MappingMethod MappingMethodType `json:"mappingMethod,omitempty"`
+
+ IdentityProviderConfig `json:",inline"`
+}
+
+// MappingMethodType specifies how new identities should be mapped to users when they log in
+type MappingMethodType string
+
+const (
+ // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user
+ // with that user name is already mapped to another identity.
+ // Default.
+ MappingMethodClaim MappingMethodType = "claim"
+
+ // MappingMethodLookup looks up existing users already mapped to an identity but does not
+ // automatically provision users or identities. Requires identities and users be set up
+ // manually or using an external process.
+ MappingMethodLookup MappingMethodType = "lookup"
+
+ // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with
+ // that user name already exists, the identity is mapped to the existing user, adding to any
+ // existing identity mappings for the user.
+ MappingMethodAdd MappingMethodType = "add"
+)
+
+type IdentityProviderType string
+
+const (
+ // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth
+ IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth"
+
+ // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials
+ IdentityProviderTypeGitHub IdentityProviderType = "GitHub"
+
+ // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials
+ IdentityProviderTypeGitLab IdentityProviderType = "GitLab"
+
+ // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials
+ IdentityProviderTypeGoogle IdentityProviderType = "Google"
+
+ // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file
+ IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd"
+
+ // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials
+ IdentityProviderTypeKeystone IdentityProviderType = "Keystone"
+
+ // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials
+ IdentityProviderTypeLDAP IdentityProviderType = "LDAP"
+
+ // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials
+ IdentityProviderTypeOpenID IdentityProviderType = "OpenID"
+
+ // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials
+ IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader"
+)
+
+// IdentityProviderConfig contains configuration for using a specific identity provider
+type IdentityProviderConfig struct {
+ // type identifies the identity provider type for this entry.
+ Type IdentityProviderType `json:"type"`
+
+ // Provider-specific configuration
+ // The json tag MUST match the `Type` specified above, case-insensitively
+ // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided
+
+ // basicAuth contains configuration options for the BasicAuth IdP
+ // +optional
+ BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"`
+
+ // github enables user authentication using GitHub credentials
+ // +optional
+ GitHub *GitHubIdentityProvider `json:"github,omitempty"`
+
+ // gitlab enables user authentication using GitLab credentials
+ // +optional
+ GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"`
+
+ // google enables user authentication using Google credentials
+ // +optional
+ Google *GoogleIdentityProvider `json:"google,omitempty"`
+
+ // htpasswd enables user authentication using an HTPasswd file to validate credentials
+ // +optional
+ HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"`
+
+ // keystone enables user authentication using keystone password credentials
+ // +optional
+ Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"`
+
+ // ldap enables user authentication using LDAP credentials
+ // +optional
+ LDAP *LDAPIdentityProvider `json:"ldap,omitempty"`
+
+ // openID enables user authentication using OpenID credentials
+ // +optional
+ OpenID *OpenIDIdentityProvider `json:"openID,omitempty"`
+
+ // requestHeader enables user authentication using request header credentials
+ // +optional
+ RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"`
+}
+
+// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials
+type BasicAuthIdentityProvider struct {
+ // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server
+ OAuthRemoteConnectionInfo `json:",inline"`
+}
+
+// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection
+type OAuthRemoteConnectionInfo struct {
+ // url is the remote URL to connect to
+ URL string `json:"url"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+
+ // tlsClientCert is an optional reference to a secret by name that contains the
+ // PEM-encoded TLS client certificate to present when connecting to the server.
+ // The key "tls.crt" is used to locate the data.
+ // If specified and the secret or expected key is not found, the identity provider is not honored.
+ // If the specified certificate data is not valid, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ TLSClientCert SecretNameReference `json:"tlsClientCert"`
+
+ // tlsClientKey is an optional reference to a secret by name that contains the
+ // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert.
+ // The key "tls.key" is used to locate the data.
+ // If specified and the secret or expected key is not found, the identity provider is not honored.
+ // If the specified certificate data is not valid, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ TLSClientKey SecretNameReference `json:"tlsClientKey"`
+}
+
+// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials
+type HTPasswdIdentityProvider struct {
+ // fileData is a required reference to a secret by name containing the data to use as the htpasswd file.
+ // The key "htpasswd" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // If the specified htpasswd data is not valid, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ FileData SecretNameReference `json:"fileData"`
+}
+
+// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials
+type LDAPIdentityProvider struct {
+ // url is an RFC 2255 URL which specifies the LDAP search parameters to use.
+ // The syntax of the URL is:
+ // ldap://host:port/basedn?attribute?scope?filter
+ URL string `json:"url"`
+
+ // bindDN is an optional DN to bind with during the search phase.
+ // +optional
+ BindDN string `json:"bindDN"`
+
+ // bindPassword is an optional reference to a secret by name
+ // containing a password to bind with during the search phase.
+ // The key "bindPassword" is used to locate the data.
+ // If specified and the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ BindPassword SecretNameReference `json:"bindPassword"`
+
+ // insecure, if true, indicates the connection should not use TLS
+ // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always
+ // attempt to connect using TLS, even when `insecure` is set to `true`
+ // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to
+ // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.
+ Insecure bool `json:"insecure"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+
+ // attributes maps LDAP attributes to identities
+ Attributes LDAPAttributeMapping `json:"attributes"`
+}
+
+// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields
+type LDAPAttributeMapping struct {
+ // id is the list of attributes whose values should be used as the user ID. Required.
+ // First non-empty attribute is used. At least one attribute is required. If none of the listed
+ // attribute have a value, authentication fails.
+ // LDAP standard identity attribute is "dn"
+ ID []string `json:"id"`
+
+ // preferredUsername is the list of attributes whose values should be used as the preferred username.
+ // LDAP standard login attribute is "uid"
+ // +optional
+ PreferredUsername []string `json:"preferredUsername,omitempty"`
+
+ // name is the list of attributes whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ // LDAP standard display name attribute is "cn"
+ // +optional
+ Name []string `json:"name,omitempty"`
+
+ // email is the list of attributes whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ // +optional
+ Email []string `json:"email,omitempty"`
+}
+
+// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials
+type KeystoneIdentityProvider struct {
+ // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server
+ OAuthRemoteConnectionInfo `json:",inline"`
+
+ // domainName is required for keystone v3
+ DomainName string `json:"domainName"`
+
+ // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration
+ // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID
+ // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility
+ // +optional
+ // UseUsernameIdentity bool `json:"useUsernameIdentity"`
+}
+
+// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials
+type RequestHeaderIdentityProvider struct {
+ // loginURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ // Required when login is set to true.
+ LoginURL string `json:"loginURL"`
+
+ // challengeURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be
+ // redirected here.
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ // Required when challenge is set to true.
+ ChallengeURL string `json:"challengeURL"`
+
+ // ca is a required reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // Specifically, it allows verification of incoming requests to prevent header spoofing.
+ // The key "ca.crt" is used to locate the data.
+ // If the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // The namespace for this config map is openshift-config.
+ ClientCA ConfigMapNameReference `json:"ca"`
+
+ // clientCommonNames is an optional list of common names to require a match from. If empty, any
+ // client certificate validated against the clientCA bundle is considered authoritative.
+ // +optional
+ ClientCommonNames []string `json:"clientCommonNames,omitempty"`
+
+ // headers is the set of headers to check for identity information
+ Headers []string `json:"headers"`
+
+ // preferredUsernameHeaders is the set of headers to check for the preferred username
+ PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"`
+
+ // nameHeaders is the set of headers to check for the display name
+ NameHeaders []string `json:"nameHeaders"`
+
+ // emailHeaders is the set of headers to check for the email address
+ EmailHeaders []string `json:"emailHeaders"`
+}
+
+// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials
+type GitHubIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // organizations optionally restricts which organizations are allowed to log in
+ // +optional
+ Organizations []string `json:"organizations,omitempty"`
+
+ // teams optionally restricts which teams are allowed to log in. Format is /.
+ // +optional
+ Teams []string `json:"teams,omitempty"`
+
+ // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of
+ // GitHub Enterprise.
+ // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.
+ // +optional
+ Hostname string `json:"hostname"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // This can only be configured when hostname is set to a non-empty value.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+}
+
+// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials
+type GitLabIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // url is the oauth server base URL
+ URL string `json:"url"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+}
+
+// GoogleIdentityProvider provides identities for users authenticating using Google credentials
+type GoogleIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to
+ // +optional
+ HostedDomain string `json:"hostedDomain"`
+}
+
+// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials
+type OpenIDIdentityProvider struct {
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+
+ // clientSecret is a required reference to the secret by name containing the oauth client secret.
+ // The key "clientSecret" is used to locate the data.
+ // If the secret or expected key is not found, the identity provider is not honored.
+ // The namespace for this secret is openshift-config.
+ ClientSecret SecretNameReference `json:"clientSecret"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca.crt" is used to locate the data.
+ // If specified and the config map or expected key is not found, the identity provider is not honored.
+ // If the specified ca data is not valid, the identity provider is not honored.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA ConfigMapNameReference `json:"ca"`
+
+ // extraScopes are any scopes to request in addition to the standard "openid" scope.
+ // +optional
+ ExtraScopes []string `json:"extraScopes,omitempty"`
+
+ // extraAuthorizeParameters are any custom parameters to add to the authorize request.
+ // +optional
+ ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"`
+
+ // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier.
+ // It must use the https scheme with no query or fragment component.
+ Issuer string `json:"issuer"`
+
+ // claims mappings
+ Claims OpenIDClaims `json:"claims"`
+}
+
+// UserIDClaim is the claim used to provide a stable identifier for OIDC identities.
+// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability
+//
+// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can
+// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique
+// and never reassigned within the Issuer for a particular End-User, as described in Section 2.
+// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the
+// iss Claim and the sub Claim."
+const UserIDClaim = "sub"
+
+// OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo
+// responses
+// +kubebuilder:validation:MinLength=1
+type OpenIDClaim string
+
+// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider
+type OpenIDClaims struct {
+ // preferredUsername is the list of claims whose values should be used as the preferred username.
+ // If unspecified, the preferred username is determined from the value of the sub claim
+ // +listType=atomic
+ // +optional
+ PreferredUsername []string `json:"preferredUsername,omitempty"`
+
+ // name is the list of claims whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ // +listType=atomic
+ // +optional
+ Name []string `json:"name,omitempty"`
+
+ // email is the list of claims whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ // +listType=atomic
+ // +optional
+ Email []string `json:"email,omitempty"`
+
+ // groups is the list of claims value of which should be used to synchronize groups
+ // from the OIDC provider to OpenShift for the user.
+ // If multiple claims are specified, the first one with a non-empty value is used.
+ // +listType=atomic
+ // +optional
+ Groups []OpenIDClaim `json:"groups,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []OAuth `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
new file mode 100644
index 0000000000..1fddfa51e5
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go
@@ -0,0 +1,96 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// OperatorHubSpec defines the desired state of OperatorHub
+type OperatorHubSpec struct {
+ // disableAllDefaultSources allows you to disable all the default hub
+ // sources. If this is true, a specific entry in sources can be used to
+ // enable a default source. If this is false, a specific entry in
+ // sources can be used to disable or enable a default source.
+ // +optional
+ DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"`
+ // sources is the list of default hub sources and their configuration.
+ // If the list is empty, it implies that the default hub sources are
+ // enabled on the cluster unless disableAllDefaultSources is true.
+ // If disableAllDefaultSources is true and sources is not empty,
+ // the configuration present in sources will take precedence. The list of
+ // default hub sources and their current state will always be reflected in
+ // the status block.
+ // +optional
+ Sources []HubSource `json:"sources,omitempty"`
+}
+
+// OperatorHubStatus defines the observed state of OperatorHub. The current
+// state of the default hub sources will always be reflected here.
+type OperatorHubStatus struct {
+ // sources encapsulates the result of applying the configuration for each
+ // hub source
+ Sources []HubSourceStatus `json:"sources,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatorHub is the Schema for the operatorhubs API. It can be used to change
+// the state of the default hub sources for OperatorHub on the cluster from
+// enabled to disabled and vice versa.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=operatorhubs,scope=Cluster
+// +kubebuilder:subresource:status
+// +genclient
+// +genclient:nonNamespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=marketplace,operatorOrdering=01
+// +openshift:capability=marketplace
+// +openshift:compatibility-gen:level=1
+type OperatorHub struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec OperatorHubSpec `json:"spec"`
+ Status OperatorHubStatus `json:"status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OperatorHubList contains a list of OperatorHub
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OperatorHubList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+ Items []OperatorHub `json:"items"`
+}
+
+// HubSource is used to specify the hub source and its configuration
+type HubSource struct {
+ // name is the name of one of the default hub sources
+ // +kubebuilder:validation:MaxLength=253
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:Required
+ Name string `json:"name"`
+ // disabled is used to disable a default hub source on cluster
+ // +kubebuilder:Required
+ Disabled bool `json:"disabled"`
+}
+
+// HubSourceStatus is used to reflect the current state of applying the
+// configuration to a default source
+type HubSourceStatus struct {
+ HubSource `json:",omitempty"`
+ // status indicates success or failure in applying the configuration
+ Status string `json:"status,omitempty"`
+ // message provides more information regarding failures
+ Message string `json:"message,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go
new file mode 100644
index 0000000000..8d6d614b67
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_project.go
@@ -0,0 +1,70 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Project holds cluster-wide information about Project. The canonical name is `cluster`
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=projects,scope=Cluster
+// +kubebuilder:subresource:status
+type Project struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ProjectSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ProjectStatus `json:"status"`
+}
+
+// TemplateReference references a template in a specific namespace.
+// The namespace must be specified at the point of use.
+type TemplateReference struct {
+ // name is the metadata.name of the referenced project request template
+ Name string `json:"name"`
+}
+
+// ProjectSpec holds the project creation configuration.
+type ProjectSpec struct {
+ // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint
+ // +optional
+ ProjectRequestMessage string `json:"projectRequestMessage"`
+
+ // projectRequestTemplate is the template to use for creating projects in response to projectrequest.
+ // This must point to a template in 'openshift-config' namespace. It is optional.
+ // If it is not specified, a default template is used.
+ //
+ // +optional
+ ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"`
+}
+
+type ProjectStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ProjectList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Project `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go
new file mode 100644
index 0000000000..851291bb05
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go
@@ -0,0 +1,110 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=proxies,scope=Cluster
+// +kubebuilder:subresource:status
+type Proxy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec holds user-settable values for the proxy configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ProxySpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ProxyStatus `json:"status"`
+}
+
+// ProxySpec contains cluster proxy creation configuration.
+type ProxySpec struct {
+ // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.
+ // +optional
+ HTTPProxy string `json:"httpProxy,omitempty"`
+
+ // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.
+ // +optional
+ HTTPSProxy string `json:"httpsProxy,omitempty"`
+
+ // noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
+ // Empty means unset and will not result in an env var.
+ // +optional
+ NoProxy string `json:"noProxy,omitempty"`
+
+ // readinessEndpoints is a list of endpoints used to verify readiness of the proxy.
+ // +optional
+ ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"`
+
+ // trustedCA is a reference to a ConfigMap containing a CA certificate bundle.
+ // The trustedCA field should only be consumed by a proxy validator. The
+ // validator is responsible for reading the certificate bundle from the required
+ // key "ca-bundle.crt", merging it with the system default trust bundle,
+ // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle"
+ // in the "openshift-config-managed" namespace. Clients that expect to make
+ // proxy connections must use the trusted-ca-bundle for all HTTPS requests to
+ // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as
+ // well.
+ //
+ // The namespace for the ConfigMap referenced by trustedCA is
+ // "openshift-config". Here is an example ConfigMap (in yaml):
+ //
+ // apiVersion: v1
+ // kind: ConfigMap
+ // metadata:
+ // name: user-ca-bundle
+ // namespace: openshift-config
+ // data:
+ // ca-bundle.crt: |
+ // -----BEGIN CERTIFICATE-----
+ // Custom CA certificate bundle.
+ // -----END CERTIFICATE-----
+ //
+ // +optional
+ TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"`
+}
+
+// ProxyStatus shows current known state of the cluster proxy.
+type ProxyStatus struct {
+ // httpProxy is the URL of the proxy for HTTP requests.
+ // +optional
+ HTTPProxy string `json:"httpProxy,omitempty"`
+
+ // httpsProxy is the URL of the proxy for HTTPS requests.
+ // +optional
+ HTTPSProxy string `json:"httpsProxy,omitempty"`
+
+ // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.
+ // +optional
+ NoProxy string `json:"noProxy,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ProxyList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Proxy `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
new file mode 100644
index 0000000000..061c4a8835
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go
@@ -0,0 +1,144 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler
+// and influence its placement decisions. The canonical name for this config is `cluster`.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=schedulers,scope=Cluster
+// +kubebuilder:subresource:status
+type Scheduler struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec SchedulerSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status SchedulerStatus `json:"status"`
+}
+
+type SchedulerSpec struct {
+ // DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release.
+ // policy is a reference to a ConfigMap containing scheduler policy which has
+ // user specified predicates and priorities. If this ConfigMap is not available
+ // scheduler will default to use DefaultAlgorithmProvider.
+ // The namespace for this configmap is openshift-config.
+ // +optional
+ Policy ConfigMapNameReference `json:"policy,omitempty"`
+ // profile sets which scheduling profile should be set in order to configure scheduling
+ // decisions for new pods.
+ //
+ // Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring"
+ // Defaults to "LowNodeUtilization"
+ // +optional
+ Profile SchedulerProfile `json:"profile,omitempty"`
+ // profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.
+ // +openshift:enable:FeatureGate=DynamicResourceAllocation
+ // +optional
+ ProfileCustomizations ProfileCustomizations `json:"profileCustomizations"`
+ // defaultNodeSelector helps set the cluster-wide default node selector to
+ // restrict pod placement to specific nodes. This is applied to the pods
+ // created in all namespaces and creates an intersection with any existing
+ // nodeSelectors already set on a pod, additionally constraining that pod's selector.
+ // For example,
+ // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector
+ // field in pod spec to "type=user-node,region=east" to all pods created
+ // in all namespaces. Namespaces having project-wide node selectors won't be
+ // impacted even if this field is set. This adds an annotation section to
+ // the namespace.
+ // For example, if a new namespace is created with
+ // node-selector='type=user-node,region=east',
+ // the annotation openshift.io/node-selector: type=user-node,region=east
+ // gets added to the project. When the openshift.io/node-selector annotation
+ // is set on the project the value is used in preference to the value we are setting
+ // for defaultNodeSelector field.
+ // For instance,
+ // openshift.io/node-selector: "type=user-node,region=west" means
+ // that the default of "type=user-node,region=east" set in defaultNodeSelector
+ // would not be applied.
+ // +optional
+ DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"`
+ // MastersSchedulable allows masters nodes to be schedulable. When this flag is
+ // turned on, all the master nodes in the cluster will be made schedulable,
+ // so that workload pods can run on them. The default value for this field is false,
+ // meaning none of the master nodes are schedulable.
+ // Important Note: Once the workload pods start running on the master nodes,
+ // extreme care must be taken to ensure that cluster-critical control plane components
+ // are not impacted.
+ // Please turn on this field after doing due diligence.
+ // +optional
+ MastersSchedulable bool `json:"mastersSchedulable"`
+}
+
+// +kubebuilder:validation:Enum="";LowNodeUtilization;HighNodeUtilization;NoScoring
+type SchedulerProfile string
+
+var (
+ // LowNodeUtililization is the default, and defines a scheduling profile which prefers to
+ // spread pods evenly among nodes targeting low resource consumption on each node.
+ LowNodeUtilization SchedulerProfile = "LowNodeUtilization"
+
+ // HighNodeUtilization defines a scheduling profile which packs as many pods as possible onto
+ // as few nodes as possible targeting a small node count but high resource usage on each node.
+ HighNodeUtilization SchedulerProfile = "HighNodeUtilization"
+
+ // NoScoring defines a scheduling profile which tries to provide lower-latency scheduling
+ // at the expense of potentially less optimal pod placement decisions.
+ NoScoring SchedulerProfile = "NoScoring"
+)
+
+// ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles
+type ProfileCustomizations struct {
+ // dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler.
+ // Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod.
+ // Third-party resource drivers are responsible for tracking and allocating resources.
+ // Different kinds of resources support arbitrary parameters for defining requirements and initialization.
+ // Valid values are Enabled, Disabled and omitted.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default,
+ // which is subject to change over time.
+ // The current default is Disabled.
+ // +optional
+ DynamicResourceAllocation DRAEnablement `json:"dynamicResourceAllocation"`
+}
+
+// +kubebuilder:validation:Enum:="";"Enabled";"Disabled"
+type DRAEnablement string
+
+var (
+ // DRAEnablementEnabled enables dynamic resource allocation feature
+ DRAEnablementEnabled DRAEnablement = "Enabled"
+ // DRAEnablementDisabled disables dynamic resource allocation feature
+ DRAEnablementDisabled DRAEnablement = "Disabled"
+)
+
+type SchedulerStatus struct {
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type SchedulerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Scheduler `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go
new file mode 100644
index 0000000000..4d642e060b
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go
@@ -0,0 +1,46 @@
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into
+// the payload for later analysis on a per-payload basis.
+// This doesn't need any CRD because it's never stored in the cluster.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:internal
+type TestReporting struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec TestReportingSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status TestReportingStatus `json:"status"`
+}
+
+type TestReportingSpec struct {
+ // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.
+ TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"`
+}
+
+type FeatureGateTests struct {
+ // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.
+ FeatureGate string `json:"featureGate"`
+
+ // Tests contains an item for every TestName
+ Tests []TestDetails `json:"tests"`
+}
+
+type TestDetails struct {
+ // TestName is the name of the test as it appears in junit XMLs.
+ // It does not include the suite name since the same test can be executed in many suites.
+ TestName string `json:"testName"`
+}
+
+type TestReportingStatus struct {
+}
diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
new file mode 100644
index 0000000000..c5dea1a032
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go
@@ -0,0 +1,311 @@
+package v1
+
+// TLSSecurityProfile defines the schema for a TLS security profile. This object
+// is used by operators to apply TLS security settings to operands.
+// +union
+type TLSSecurityProfile struct {
+ // type is one of Old, Intermediate, Modern or Custom. Custom provides
+ // the ability to specify individual TLS security profile parameters.
+ // Old, Intermediate and Modern are TLS security profiles based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations
+ //
+ // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers
+ // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be
+ // reduced.
+ //
+ // Note that the Modern profile is currently not supported because it is not
+ // yet well adopted by common software libraries.
+ //
+ // +unionDiscriminator
+ // +optional
+ Type TLSProfileType `json:"type"`
+ // old is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ //
+ // - TLS_AES_128_GCM_SHA256
+ //
+ // - TLS_AES_256_GCM_SHA384
+ //
+ // - TLS_CHACHA20_POLY1305_SHA256
+ //
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ //
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ //
+ // - ECDHE-ECDSA-AES256-GCM-SHA384
+ //
+ // - ECDHE-RSA-AES256-GCM-SHA384
+ //
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ //
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ //
+ // - DHE-RSA-AES128-GCM-SHA256
+ //
+ // - DHE-RSA-AES256-GCM-SHA384
+ //
+ // - DHE-RSA-CHACHA20-POLY1305
+ //
+ // - ECDHE-ECDSA-AES128-SHA256
+ //
+ // - ECDHE-RSA-AES128-SHA256
+ //
+ // - ECDHE-ECDSA-AES128-SHA
+ //
+ // - ECDHE-RSA-AES128-SHA
+ //
+ // - ECDHE-ECDSA-AES256-SHA384
+ //
+ // - ECDHE-RSA-AES256-SHA384
+ //
+ // - ECDHE-ECDSA-AES256-SHA
+ //
+ // - ECDHE-RSA-AES256-SHA
+ //
+ // - DHE-RSA-AES128-SHA256
+ //
+ // - DHE-RSA-AES256-SHA256
+ //
+ // - AES128-GCM-SHA256
+ //
+ // - AES256-GCM-SHA384
+ //
+ // - AES128-SHA256
+ //
+ // - AES256-SHA256
+ //
+ // - AES128-SHA
+ //
+ // - AES256-SHA
+ //
+ // - DES-CBC3-SHA
+ //
+ // minTLSVersion: VersionTLS10
+ //
+ // +optional
+ // +nullable
+ Old *OldTLSProfile `json:"old,omitempty"`
+ // intermediate is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ //
+ // - TLS_AES_128_GCM_SHA256
+ //
+ // - TLS_AES_256_GCM_SHA384
+ //
+ // - TLS_CHACHA20_POLY1305_SHA256
+ //
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ //
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ //
+ // - ECDHE-ECDSA-AES256-GCM-SHA384
+ //
+ // - ECDHE-RSA-AES256-GCM-SHA384
+ //
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ //
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ //
+ // - DHE-RSA-AES128-GCM-SHA256
+ //
+ // - DHE-RSA-AES256-GCM-SHA384
+ //
+ // minTLSVersion: VersionTLS12
+ //
+ // +optional
+ // +nullable
+ Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"`
+ // modern is a TLS security profile based on:
+ //
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ //
+ // and looks like this (yaml):
+ //
+ // ciphers:
+ //
+ // - TLS_AES_128_GCM_SHA256
+ //
+ // - TLS_AES_256_GCM_SHA384
+ //
+ // - TLS_CHACHA20_POLY1305_SHA256
+ //
+ // minTLSVersion: VersionTLS13
+ //
+ // +optional
+ // +nullable
+ Modern *ModernTLSProfile `json:"modern,omitempty"`
+ // custom is a user-defined TLS security profile. Be extremely careful using a custom
+ // profile as invalid configurations can be catastrophic. An example custom profile
+ // looks like this:
+ //
+ // ciphers:
+ //
+ // - ECDHE-ECDSA-CHACHA20-POLY1305
+ //
+ // - ECDHE-RSA-CHACHA20-POLY1305
+ //
+ // - ECDHE-RSA-AES128-GCM-SHA256
+ //
+ // - ECDHE-ECDSA-AES128-GCM-SHA256
+ //
+ // minTLSVersion: VersionTLS11
+ //
+ // +optional
+ // +nullable
+ Custom *CustomTLSProfile `json:"custom,omitempty"`
+}
+
+// OldTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+type OldTLSProfile struct{}
+
+// IntermediateTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
+type IntermediateTLSProfile struct{}
+
+// ModernTLSProfile is a TLS security profile based on:
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+type ModernTLSProfile struct{}
+
+// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful
+// using a custom TLS profile as invalid configurations can be catastrophic.
+type CustomTLSProfile struct {
+ TLSProfileSpec `json:",inline"`
+}
+
+// TLSProfileType defines a TLS security profile type.
+// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom
+type TLSProfileType string
+
+const (
+ // Old is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
+ TLSProfileOldType TLSProfileType = "Old"
+ // Intermediate is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
+ TLSProfileIntermediateType TLSProfileType = "Intermediate"
+ // Modern is a TLS security profile based on:
+ // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
+ TLSProfileModernType TLSProfileType = "Modern"
+ // Custom is a TLS security profile that allows for user-defined parameters.
+ TLSProfileCustomType TLSProfileType = "Custom"
+)
+
+// TLSProfileSpec is the desired behavior of a TLSSecurityProfile.
+type TLSProfileSpec struct {
+ // ciphers is used to specify the cipher algorithms that are negotiated
+ // during the TLS handshake. Operators may remove entries their operands
+ // do not support. For example, to use DES-CBC3-SHA (yaml):
+ //
+ // ciphers:
+ // - DES-CBC3-SHA
+ //
+ Ciphers []string `json:"ciphers"`
+ // minTLSVersion is used to specify the minimal version of the TLS protocol
+ // that is negotiated during the TLS handshake. For example, to use TLS
+ // versions 1.1, 1.2 and 1.3 (yaml):
+ //
+ // minTLSVersion: VersionTLS11
+ //
+ // NOTE: currently the highest minTLSVersion allowed is VersionTLS12
+ //
+ MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"`
+}
+
+// TLSProtocolVersion is a way to specify the protocol version used for TLS connections.
+// Protocol versions are based on the following most common TLS configurations:
+//
+// https://ssl-config.mozilla.org/
+//
+// Note that SSLv3.0 is not a supported protocol version due to well known
+// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE
+// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13
+type TLSProtocolVersion string
+
+const (
+ // VersionTLSv10 is version 1.0 of the TLS security protocol.
+ VersionTLS10 TLSProtocolVersion = "VersionTLS10"
+ // VersionTLSv11 is version 1.1 of the TLS security protocol.
+ VersionTLS11 TLSProtocolVersion = "VersionTLS11"
+ // VersionTLSv12 is version 1.2 of the TLS security protocol.
+ VersionTLS12 TLSProtocolVersion = "VersionTLS12"
+ // VersionTLSv13 is version 1.3 of the TLS security protocol.
+ VersionTLS13 TLSProtocolVersion = "VersionTLS13"
+)
+
+// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec.
+//
+// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all
+// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail,
+// just be sure to whitelist only and everything will be ok.
+var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{
+ TLSProfileOldType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "ECDHE-ECDSA-AES128-GCM-SHA256",
+ "ECDHE-RSA-AES128-GCM-SHA256",
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-CHACHA20-POLY1305",
+ "ECDHE-RSA-CHACHA20-POLY1305",
+ "DHE-RSA-AES128-GCM-SHA256",
+ "DHE-RSA-AES256-GCM-SHA384",
+ "DHE-RSA-CHACHA20-POLY1305",
+ "ECDHE-ECDSA-AES128-SHA256",
+ "ECDHE-RSA-AES128-SHA256",
+ "ECDHE-ECDSA-AES128-SHA",
+ "ECDHE-RSA-AES128-SHA",
+ "ECDHE-ECDSA-AES256-SHA384",
+ "ECDHE-RSA-AES256-SHA384",
+ "ECDHE-ECDSA-AES256-SHA",
+ "ECDHE-RSA-AES256-SHA",
+ "DHE-RSA-AES128-SHA256",
+ "DHE-RSA-AES256-SHA256",
+ "AES128-GCM-SHA256",
+ "AES256-GCM-SHA384",
+ "AES128-SHA256",
+ "AES256-SHA256",
+ "AES128-SHA",
+ "AES256-SHA",
+ "DES-CBC3-SHA",
+ },
+ MinTLSVersion: VersionTLS10,
+ },
+ TLSProfileIntermediateType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ "ECDHE-ECDSA-AES128-GCM-SHA256",
+ "ECDHE-RSA-AES128-GCM-SHA256",
+ "ECDHE-ECDSA-AES256-GCM-SHA384",
+ "ECDHE-RSA-AES256-GCM-SHA384",
+ "ECDHE-ECDSA-CHACHA20-POLY1305",
+ "ECDHE-RSA-CHACHA20-POLY1305",
+ "DHE-RSA-AES128-GCM-SHA256",
+ "DHE-RSA-AES256-GCM-SHA384",
+ },
+ MinTLSVersion: VersionTLS12,
+ },
+ TLSProfileModernType: {
+ Ciphers: []string{
+ "TLS_AES_128_GCM_SHA256",
+ "TLS_AES_256_GCM_SHA384",
+ "TLS_CHACHA20_POLY1305_SHA256",
+ },
+ MinTLSVersion: VersionTLS13,
+ },
+}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..9a81bc559c
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go
@@ -0,0 +1,6034 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServer) DeepCopyInto(out *APIServer) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer.
+func (in *APIServer) DeepCopy() *APIServer {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIServer) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption.
+func (in *APIServerEncryption) DeepCopy() *APIServerEncryption {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerEncryption)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerList) DeepCopyInto(out *APIServerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIServer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList.
+func (in *APIServerList) DeepCopy() *APIServerList {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIServerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) {
+ *out = *in
+ if in.Names != nil {
+ in, out := &in.Names, &out.Names
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.ServingCertificate = in.ServingCertificate
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert.
+func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerNamedServingCert)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) {
+ *out = *in
+ if in.NamedCertificates != nil {
+ in, out := &in.NamedCertificates, &out.NamedCertificates
+ *out = make([]APIServerNamedServingCert, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts.
+func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerServingCerts)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) {
+ *out = *in
+ in.ServingCerts.DeepCopyInto(&out.ServingCerts)
+ out.ClientCA = in.ClientCA
+ if in.AdditionalCORSAllowedOrigins != nil {
+ in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.Encryption = in.Encryption
+ if in.TLSSecurityProfile != nil {
+ in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile
+ *out = new(TLSSecurityProfile)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Audit.DeepCopyInto(&out.Audit)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec.
+func (in *APIServerSpec) DeepCopy() *APIServerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus.
+func (in *APIServerStatus) DeepCopy() *APIServerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSDNSSpec) DeepCopyInto(out *AWSDNSSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDNSSpec.
+func (in *AWSDNSSpec) DeepCopy() *AWSDNSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSDNSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSIngressSpec) DeepCopyInto(out *AWSIngressSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSIngressSpec.
+func (in *AWSIngressSpec) DeepCopy() *AWSIngressSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSIngressSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) {
+ *out = *in
+ if in.ServiceEndpoints != nil {
+ in, out := &in.ServiceEndpoints, &out.ServiceEndpoints
+ *out = make([]AWSServiceEndpoint, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec.
+func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) {
+ *out = *in
+ if in.ServiceEndpoints != nil {
+ in, out := &in.ServiceEndpoints, &out.ServiceEndpoints
+ *out = make([]AWSServiceEndpoint, len(*in))
+ copy(*out, *in)
+ }
+ if in.ResourceTags != nil {
+ in, out := &in.ResourceTags, &out.ResourceTags
+ *out = make([]AWSResourceTag, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus.
+func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag.
+func (in *AWSResourceTag) DeepCopy() *AWSResourceTag {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSResourceTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint.
+func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSServiceEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) {
+ *out = *in
+ if in.PluginConfig != nil {
+ in, out := &in.PluginConfig, &out.PluginConfig
+ *out = make(map[string]AdmissionPluginConfig, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.EnabledAdmissionPlugins != nil {
+ in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DisabledAdmissionPlugins != nil {
+ in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig.
+func (in *AdmissionConfig) DeepCopy() *AdmissionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) {
+ *out = *in
+ in.Configuration.DeepCopyInto(&out.Configuration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig.
+func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionPluginConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlibabaCloudPlatformSpec) DeepCopyInto(out *AlibabaCloudPlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformSpec.
+func (in *AlibabaCloudPlatformSpec) DeepCopy() *AlibabaCloudPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlibabaCloudPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlibabaCloudPlatformStatus) DeepCopyInto(out *AlibabaCloudPlatformStatus) {
+ *out = *in
+ if in.ResourceTags != nil {
+ in, out := &in.ResourceTags, &out.ResourceTags
+ *out = make([]AlibabaCloudResourceTag, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformStatus.
+func (in *AlibabaCloudPlatformStatus) DeepCopy() *AlibabaCloudPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AlibabaCloudPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlibabaCloudResourceTag) DeepCopyInto(out *AlibabaCloudResourceTag) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudResourceTag.
+func (in *AlibabaCloudResourceTag) DeepCopy() *AlibabaCloudResourceTag {
+ if in == nil {
+ return nil
+ }
+ out := new(AlibabaCloudResourceTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Audit) DeepCopyInto(out *Audit) {
+ *out = *in
+ if in.CustomRules != nil {
+ in, out := &in.CustomRules, &out.CustomRules
+ *out = make([]AuditCustomRule, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit.
+func (in *Audit) DeepCopy() *Audit {
+ if in == nil {
+ return nil
+ }
+ out := new(Audit)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
+ *out = *in
+ in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
+func (in *AuditConfig) DeepCopy() *AuditConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditCustomRule) DeepCopyInto(out *AuditCustomRule) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditCustomRule.
+func (in *AuditCustomRule) DeepCopy() *AuditCustomRule {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditCustomRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Authentication) DeepCopyInto(out *Authentication) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication.
+func (in *Authentication) DeepCopy() *Authentication {
+ if in == nil {
+ return nil
+ }
+ out := new(Authentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Authentication) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Authentication, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList.
+func (in *AuthenticationList) DeepCopy() *AuthenticationList {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
+ *out = *in
+ out.OAuthMetadata = in.OAuthMetadata
+ if in.WebhookTokenAuthenticators != nil {
+ in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators
+ *out = make([]DeprecatedWebhookTokenAuthenticator, len(*in))
+ copy(*out, *in)
+ }
+ if in.WebhookTokenAuthenticator != nil {
+ in, out := &in.WebhookTokenAuthenticator, &out.WebhookTokenAuthenticator
+ *out = new(WebhookTokenAuthenticator)
+ **out = **in
+ }
+ if in.OIDCProviders != nil {
+ in, out := &in.OIDCProviders, &out.OIDCProviders
+ *out = make([]OIDCProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
+func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) {
+ *out = *in
+ out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata
+ if in.OIDCClients != nil {
+ in, out := &in.OIDCClients, &out.OIDCClients
+ *out = make([]OIDCClientStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus.
+func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec.
+func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AzurePlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) {
+ *out = *in
+ if in.ResourceTags != nil {
+ in, out := &in.ResourceTags, &out.ResourceTags
+ *out = make([]AzureResourceTag, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus.
+func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AzurePlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureResourceTag) DeepCopyInto(out *AzureResourceTag) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceTag.
+func (in *AzureResourceTag) DeepCopy() *AzureResourceTag {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureResourceTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BareMetalPlatformLoadBalancer) DeepCopyInto(out *BareMetalPlatformLoadBalancer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformLoadBalancer.
+func (in *BareMetalPlatformLoadBalancer) DeepCopy() *BareMetalPlatformLoadBalancer {
+ if in == nil {
+ return nil
+ }
+ out := new(BareMetalPlatformLoadBalancer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) {
+ *out = *in
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.MachineNetworks != nil {
+ in, out := &in.MachineNetworks, &out.MachineNetworks
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformSpec.
+func (in *BareMetalPlatformSpec) DeepCopy() *BareMetalPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BareMetalPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) {
+ *out = *in
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.LoadBalancer != nil {
+ in, out := &in.LoadBalancer, &out.LoadBalancer
+ *out = new(BareMetalPlatformLoadBalancer)
+ **out = **in
+ }
+ if in.MachineNetworks != nil {
+ in, out := &in.MachineNetworks, &out.MachineNetworks
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus.
+func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BareMetalPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) {
+ *out = *in
+ out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider.
+func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(BasicAuthIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Build) DeepCopyInto(out *Build) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build.
+func (in *Build) DeepCopy() *Build {
+ if in == nil {
+ return nil
+ }
+ out := new(Build)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Build) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) {
+ *out = *in
+ if in.DefaultProxy != nil {
+ in, out := &in.DefaultProxy, &out.DefaultProxy
+ *out = new(ProxySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GitProxy != nil {
+ in, out := &in.GitProxy, &out.GitProxy
+ *out = new(ProxySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ in.Resources.DeepCopyInto(&out.Resources)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults.
+func (in *BuildDefaults) DeepCopy() *BuildDefaults {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildDefaults)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildList) DeepCopyInto(out *BuildList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Build, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList.
+func (in *BuildList) DeepCopy() *BuildList {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) {
+ *out = *in
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ForcePull != nil {
+ in, out := &in.ForcePull, &out.ForcePull
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides.
+func (in *BuildOverrides) DeepCopy() *BuildOverrides {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildOverrides)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
+ *out = *in
+ out.AdditionalTrustedCA = in.AdditionalTrustedCA
+ in.BuildDefaults.DeepCopyInto(&out.BuildDefaults)
+ in.BuildOverrides.DeepCopyInto(&out.BuildOverrides)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec.
+func (in *BuildSpec) DeepCopy() *BuildSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertInfo) DeepCopyInto(out *CertInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo.
+func (in *CertInfo) DeepCopy() *CertInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(CertInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides.
+func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionOverrides)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudControllerManagerStatus) DeepCopyInto(out *CloudControllerManagerStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerStatus.
+func (in *CloudControllerManagerStatus) DeepCopy() *CloudControllerManagerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudControllerManagerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudLoadBalancerConfig) DeepCopyInto(out *CloudLoadBalancerConfig) {
+ *out = *in
+ if in.ClusterHosted != nil {
+ in, out := &in.ClusterHosted, &out.ClusterHosted
+ *out = new(CloudLoadBalancerIPs)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerConfig.
+func (in *CloudLoadBalancerConfig) DeepCopy() *CloudLoadBalancerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudLoadBalancerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudLoadBalancerIPs) DeepCopyInto(out *CloudLoadBalancerIPs) {
+ *out = *in
+ if in.APIIntLoadBalancerIPs != nil {
+ in, out := &in.APIIntLoadBalancerIPs, &out.APIIntLoadBalancerIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.APILoadBalancerIPs != nil {
+ in, out := &in.APILoadBalancerIPs, &out.APILoadBalancerIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressLoadBalancerIPs != nil {
+ in, out := &in.IngressLoadBalancerIPs, &out.IngressLoadBalancerIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudLoadBalancerIPs.
+func (in *CloudLoadBalancerIPs) DeepCopy() *CloudLoadBalancerIPs {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudLoadBalancerIPs)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) {
+ *out = *in
+ if in.PromQL != nil {
+ in, out := &in.PromQL, &out.PromQL
+ *out = new(PromQLClusterCondition)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition.
+func (in *ClusterCondition) DeepCopy() *ClusterCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
+func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator.
+func (in *ClusterOperator) DeepCopy() *ClusterOperator {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterOperator) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterOperator, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList.
+func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterOperatorList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec.
+func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ClusterOperatorStatusCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Versions != nil {
+ in, out := &in.Versions, &out.Versions
+ *out = make([]OperandVersion, len(*in))
+ copy(*out, *in)
+ }
+ if in.RelatedObjects != nil {
+ in, out := &in.RelatedObjects, &out.RelatedObjects
+ *out = make([]ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ in.Extension.DeepCopyInto(&out.Extension)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus.
+func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition.
+func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterOperatorStatusCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion.
+func (in *ClusterVersion) DeepCopy() *ClusterVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterVersion) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionCapabilitiesSpec) DeepCopyInto(out *ClusterVersionCapabilitiesSpec) {
+ *out = *in
+ if in.AdditionalEnabledCapabilities != nil {
+ in, out := &in.AdditionalEnabledCapabilities, &out.AdditionalEnabledCapabilities
+ *out = make([]ClusterVersionCapability, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesSpec.
+func (in *ClusterVersionCapabilitiesSpec) DeepCopy() *ClusterVersionCapabilitiesSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionCapabilitiesSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionCapabilitiesStatus) DeepCopyInto(out *ClusterVersionCapabilitiesStatus) {
+ *out = *in
+ if in.EnabledCapabilities != nil {
+ in, out := &in.EnabledCapabilities, &out.EnabledCapabilities
+ *out = make([]ClusterVersionCapability, len(*in))
+ copy(*out, *in)
+ }
+ if in.KnownCapabilities != nil {
+ in, out := &in.KnownCapabilities, &out.KnownCapabilities
+ *out = make([]ClusterVersionCapability, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionCapabilitiesStatus.
+func (in *ClusterVersionCapabilitiesStatus) DeepCopy() *ClusterVersionCapabilitiesStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionCapabilitiesStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterVersion, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList.
+func (in *ClusterVersionList) DeepCopy() *ClusterVersionList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterVersionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) {
+ *out = *in
+ if in.DesiredUpdate != nil {
+ in, out := &in.DesiredUpdate, &out.DesiredUpdate
+ *out = new(Update)
+ **out = **in
+ }
+ if in.Capabilities != nil {
+ in, out := &in.Capabilities, &out.Capabilities
+ *out = new(ClusterVersionCapabilitiesSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SignatureStores != nil {
+ in, out := &in.SignatureStores, &out.SignatureStores
+ *out = make([]SignatureStore, len(*in))
+ copy(*out, *in)
+ }
+ if in.Overrides != nil {
+ in, out := &in.Overrides, &out.Overrides
+ *out = make([]ComponentOverride, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec.
+func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) {
+ *out = *in
+ in.Desired.DeepCopyInto(&out.Desired)
+ if in.History != nil {
+ in, out := &in.History, &out.History
+ *out = make([]UpdateHistory, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Capabilities.DeepCopyInto(&out.Capabilities)
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ClusterOperatorStatusCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AvailableUpdates != nil {
+ in, out := &in.AvailableUpdates, &out.AvailableUpdates
+ *out = make([]Release, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ConditionalUpdates != nil {
+ in, out := &in.ConditionalUpdates, &out.ConditionalUpdates
+ *out = make([]ConditionalUpdate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus.
+func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterVersionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride.
+func (in *ComponentOverride) DeepCopy() *ComponentOverride {
+ if in == nil {
+ return nil
+ }
+ out := new(ComponentOverride)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) {
+ *out = *in
+ out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec.
+func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ComponentRouteSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) {
+ *out = *in
+ if in.ConsumingUsers != nil {
+ in, out := &in.ConsumingUsers, &out.ConsumingUsers
+ *out = make([]ConsumingUser, len(*in))
+ copy(*out, *in)
+ }
+ if in.CurrentHostnames != nil {
+ in, out := &in.CurrentHostnames, &out.CurrentHostnames
+ *out = make([]Hostname, len(*in))
+ copy(*out, *in)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.RelatedObjects != nil {
+ in, out := &in.RelatedObjects, &out.RelatedObjects
+ *out = make([]ObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus.
+func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ComponentRouteStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) {
+ *out = *in
+ in.Release.DeepCopyInto(&out.Release)
+ if in.Risks != nil {
+ in, out := &in.Risks, &out.Risks
+ *out = make([]ConditionalUpdateRisk, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdate.
+func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate {
+ if in == nil {
+ return nil
+ }
+ out := new(ConditionalUpdate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) {
+ *out = *in
+ if in.MatchingRules != nil {
+ in, out := &in.MatchingRules, &out.MatchingRules
+ *out = make([]ClusterCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdateRisk.
+func (in *ConditionalUpdateRisk) DeepCopy() *ConditionalUpdateRisk {
+ if in == nil {
+ return nil
+ }
+ out := new(ConditionalUpdateRisk)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference.
+func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigMapFileReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference.
+func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigMapNameReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Console) DeepCopyInto(out *Console) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console.
+func (in *Console) DeepCopy() *Console {
+ if in == nil {
+ return nil
+ }
+ out := new(Console)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Console) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication.
+func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleList) DeepCopyInto(out *ConsoleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Console, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList.
+func (in *ConsoleList) DeepCopy() *ConsoleList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConsoleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) {
+ *out = *in
+ out.Authentication = in.Authentication
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec.
+func (in *ConsoleSpec) DeepCopy() *ConsoleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus.
+func (in *ConsoleStatus) DeepCopy() *ConsoleStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = make([]FeatureGateName, len(*in))
+ copy(*out, *in)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = make([]FeatureGateName, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates.
+func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomFeatureGates)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) {
+ *out = *in
+ in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile.
+func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNS) DeepCopyInto(out *DNS) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
+func (in *DNS) DeepCopy() *DNS {
+ if in == nil {
+ return nil
+ }
+ out := new(DNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNS) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSList) DeepCopyInto(out *DNSList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNS, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList.
+func (in *DNSList) DeepCopy() *DNSList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSPlatformSpec) DeepCopyInto(out *DNSPlatformSpec) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSDNSSpec)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSPlatformSpec.
+func (in *DNSPlatformSpec) DeepCopy() *DNSPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSSpec) DeepCopyInto(out *DNSSpec) {
+ *out = *in
+ if in.PublicZone != nil {
+ in, out := &in.PublicZone, &out.PublicZone
+ *out = new(DNSZone)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PrivateZone != nil {
+ in, out := &in.PrivateZone, &out.PrivateZone
+ *out = new(DNSZone)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Platform.DeepCopyInto(&out.Platform)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec.
+func (in *DNSSpec) DeepCopy() *DNSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSStatus) DeepCopyInto(out *DNSStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus.
+func (in *DNSStatus) DeepCopy() *DNSStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSZone) DeepCopyInto(out *DNSZone) {
+ *out = *in
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone.
+func (in *DNSZone) DeepCopy() *DNSZone {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSZone)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication.
+func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(DelegatedAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization.
+func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization {
+ if in == nil {
+ return nil
+ }
+ out := new(DelegatedAuthorization)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeprecatedWebhookTokenAuthenticator) DeepCopyInto(out *DeprecatedWebhookTokenAuthenticator) {
+ *out = *in
+ out.KubeConfig = in.KubeConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedWebhookTokenAuthenticator.
+func (in *DeprecatedWebhookTokenAuthenticator) DeepCopy() *DeprecatedWebhookTokenAuthenticator {
+ if in == nil {
+ return nil
+ }
+ out := new(DeprecatedWebhookTokenAuthenticator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EquinixMetalPlatformSpec) DeepCopyInto(out *EquinixMetalPlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformSpec.
+func (in *EquinixMetalPlatformSpec) DeepCopy() *EquinixMetalPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EquinixMetalPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EquinixMetalPlatformStatus) DeepCopyInto(out *EquinixMetalPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformStatus.
+func (in *EquinixMetalPlatformStatus) DeepCopy() *EquinixMetalPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EquinixMetalPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) {
+ *out = *in
+ if in.URLs != nil {
+ in, out := &in.URLs, &out.URLs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo.
+func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) {
+ *out = *in
+ in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig.
+func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdStorageConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) {
+ *out = *in
+ if in.Policy != nil {
+ in, out := &in.Policy, &out.Policy
+ *out = new(ExternalIPPolicy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AutoAssignCIDRs != nil {
+ in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig.
+func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalIPConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) {
+ *out = *in
+ if in.AllowedCIDRs != nil {
+ in, out := &in.AllowedCIDRs, &out.AllowedCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.RejectedCIDRs != nil {
+ in, out := &in.RejectedCIDRs, &out.RejectedCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy.
+func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalIPPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalPlatformSpec) DeepCopyInto(out *ExternalPlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformSpec.
+func (in *ExternalPlatformSpec) DeepCopy() *ExternalPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalPlatformStatus) DeepCopyInto(out *ExternalPlatformStatus) {
+ *out = *in
+ out.CloudControllerManager = in.CloudControllerManager
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalPlatformStatus.
+func (in *ExternalPlatformStatus) DeepCopy() *ExternalPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGate) DeepCopyInto(out *FeatureGate) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate.
+func (in *FeatureGate) DeepCopy() *FeatureGate {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *FeatureGate) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateAttributes) DeepCopyInto(out *FeatureGateAttributes) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateAttributes.
+func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateAttributes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = make([]FeatureGateAttributes, len(*in))
+ copy(*out, *in)
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = make([]FeatureGateAttributes, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDetails.
+func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateDetails)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]FeatureGate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList.
+func (in *FeatureGateList) DeepCopy() *FeatureGateList {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *FeatureGateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) {
+ *out = *in
+ if in.CustomNoUpgrade != nil {
+ in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade
+ *out = new(CustomFeatureGates)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection.
+func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateSelection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) {
+ *out = *in
+ in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec.
+func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FeatureGates != nil {
+ in, out := &in.FeatureGates, &out.FeatureGates
+ *out = make([]FeatureGateDetails, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus.
+func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeatureGateTests) DeepCopyInto(out *FeatureGateTests) {
+ *out = *in
+ if in.Tests != nil {
+ in, out := &in.Tests, &out.Tests
+ *out = make([]TestDetails, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateTests.
+func (in *FeatureGateTests) DeepCopy() *FeatureGateTests {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureGateTests)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformSpec.
+func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) {
+ *out = *in
+ if in.ResourceLabels != nil {
+ in, out := &in.ResourceLabels, &out.ResourceLabels
+ *out = make([]GCPResourceLabel, len(*in))
+ copy(*out, *in)
+ }
+ if in.ResourceTags != nil {
+ in, out := &in.ResourceTags, &out.ResourceTags
+ *out = make([]GCPResourceTag, len(*in))
+ copy(*out, *in)
+ }
+ if in.CloudLoadBalancerConfig != nil {
+ in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig
+ *out = new(CloudLoadBalancerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus.
+func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPResourceLabel) DeepCopyInto(out *GCPResourceLabel) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceLabel.
+func (in *GCPResourceLabel) DeepCopy() *GCPResourceLabel {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPResourceLabel)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPResourceTag) DeepCopyInto(out *GCPResourceTag) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceTag.
+func (in *GCPResourceTag) DeepCopy() *GCPResourceTag {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPResourceTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ if in.CORSAllowedOrigins != nil {
+ in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.AuditConfig.DeepCopyInto(&out.AuditConfig)
+ in.StorageConfig.DeepCopyInto(&out.StorageConfig)
+ in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig)
+ out.KubeClientConfig = in.KubeClientConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig.
+func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GenericAPIServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ out.LeaderElection = in.LeaderElection
+ out.Authentication = in.Authentication
+ out.Authorization = in.Authorization
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig.
+func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GenericControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ if in.Organizations != nil {
+ in, out := &in.Organizations, &out.Organizations
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Teams != nil {
+ in, out := &in.Teams, &out.Teams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CA = in.CA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider.
+func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitHubIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ out.CA = in.CA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider.
+func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitLabIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider.
+func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GoogleIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) {
+ *out = *in
+ out.FileData = in.FileData
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider.
+func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(HTPasswdIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo.
+func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPServingInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HubSource) DeepCopyInto(out *HubSource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource.
+func (in *HubSource) DeepCopy() *HubSource {
+ if in == nil {
+ return nil
+ }
+ out := new(HubSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) {
+ *out = *in
+ out.HubSource = in.HubSource
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus.
+func (in *HubSourceStatus) DeepCopy() *HubSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(HubSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec.
+func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudPlatformStatus) DeepCopyInto(out *IBMCloudPlatformStatus) {
+ *out = *in
+ if in.ServiceEndpoints != nil {
+ in, out := &in.ServiceEndpoints, &out.ServiceEndpoints
+ *out = make([]IBMCloudServiceEndpoint, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformStatus.
+func (in *IBMCloudPlatformStatus) DeepCopy() *IBMCloudPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudServiceEndpoint) DeepCopyInto(out *IBMCloudServiceEndpoint) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudServiceEndpoint.
+func (in *IBMCloudServiceEndpoint) DeepCopy() *IBMCloudServiceEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudServiceEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) {
+ *out = *in
+ in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider.
+func (in *IdentityProvider) DeepCopy() *IdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) {
+ *out = *in
+ if in.BasicAuth != nil {
+ in, out := &in.BasicAuth, &out.BasicAuth
+ *out = new(BasicAuthIdentityProvider)
+ **out = **in
+ }
+ if in.GitHub != nil {
+ in, out := &in.GitHub, &out.GitHub
+ *out = new(GitHubIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GitLab != nil {
+ in, out := &in.GitLab, &out.GitLab
+ *out = new(GitLabIdentityProvider)
+ **out = **in
+ }
+ if in.Google != nil {
+ in, out := &in.Google, &out.Google
+ *out = new(GoogleIdentityProvider)
+ **out = **in
+ }
+ if in.HTPasswd != nil {
+ in, out := &in.HTPasswd, &out.HTPasswd
+ *out = new(HTPasswdIdentityProvider)
+ **out = **in
+ }
+ if in.Keystone != nil {
+ in, out := &in.Keystone, &out.Keystone
+ *out = new(KeystoneIdentityProvider)
+ **out = **in
+ }
+ if in.LDAP != nil {
+ in, out := &in.LDAP, &out.LDAP
+ *out = new(LDAPIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OpenID != nil {
+ in, out := &in.OpenID, &out.OpenID
+ *out = new(OpenIDIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RequestHeader != nil {
+ in, out := &in.RequestHeader, &out.RequestHeader
+ *out = new(RequestHeaderIdentityProvider)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig.
+func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityProviderConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Image) DeepCopyInto(out *Image) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
+func (in *Image) DeepCopy() *Image {
+ if in == nil {
+ return nil
+ }
+ out := new(Image)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Image) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageContentPolicy) DeepCopyInto(out *ImageContentPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicy.
+func (in *ImageContentPolicy) DeepCopy() *ImageContentPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageContentPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageContentPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageContentPolicyList) DeepCopyInto(out *ImageContentPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImageContentPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicyList.
+func (in *ImageContentPolicyList) DeepCopy() *ImageContentPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageContentPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageContentPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageContentPolicySpec) DeepCopyInto(out *ImageContentPolicySpec) {
+ *out = *in
+ if in.RepositoryDigestMirrors != nil {
+ in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors
+ *out = make([]RepositoryDigestMirrors, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicySpec.
+func (in *ImageContentPolicySpec) DeepCopy() *ImageContentPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageContentPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageDigestMirrorSet) DeepCopyInto(out *ImageDigestMirrorSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSet.
+func (in *ImageDigestMirrorSet) DeepCopy() *ImageDigestMirrorSet {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageDigestMirrorSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageDigestMirrorSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageDigestMirrorSetList) DeepCopyInto(out *ImageDigestMirrorSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImageDigestMirrorSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetList.
+func (in *ImageDigestMirrorSetList) DeepCopy() *ImageDigestMirrorSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageDigestMirrorSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageDigestMirrorSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageDigestMirrorSetSpec) DeepCopyInto(out *ImageDigestMirrorSetSpec) {
+ *out = *in
+ if in.ImageDigestMirrors != nil {
+ in, out := &in.ImageDigestMirrors, &out.ImageDigestMirrors
+ *out = make([]ImageDigestMirrors, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetSpec.
+func (in *ImageDigestMirrorSetSpec) DeepCopy() *ImageDigestMirrorSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageDigestMirrorSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageDigestMirrorSetStatus) DeepCopyInto(out *ImageDigestMirrorSetStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrorSetStatus.
+func (in *ImageDigestMirrorSetStatus) DeepCopy() *ImageDigestMirrorSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageDigestMirrorSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageDigestMirrors) DeepCopyInto(out *ImageDigestMirrors) {
+ *out = *in
+ if in.Mirrors != nil {
+ in, out := &in.Mirrors, &out.Mirrors
+ *out = make([]ImageMirror, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestMirrors.
+func (in *ImageDigestMirrors) DeepCopy() *ImageDigestMirrors {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageDigestMirrors)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageLabel) DeepCopyInto(out *ImageLabel) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel.
+func (in *ImageLabel) DeepCopy() *ImageLabel {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageLabel)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageList) DeepCopyInto(out *ImageList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Image, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
+func (in *ImageList) DeepCopy() *ImageList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageSpec) DeepCopyInto(out *ImageSpec) {
+ *out = *in
+ if in.AllowedRegistriesForImport != nil {
+ in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport
+ *out = make([]RegistryLocation, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExternalRegistryHostnames != nil {
+ in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.AdditionalTrustedCA = in.AdditionalTrustedCA
+ in.RegistrySources.DeepCopyInto(&out.RegistrySources)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec.
+func (in *ImageSpec) DeepCopy() *ImageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStatus) DeepCopyInto(out *ImageStatus) {
+ *out = *in
+ if in.ExternalRegistryHostnames != nil {
+ in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus.
+func (in *ImageStatus) DeepCopy() *ImageStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageTagMirrorSet) DeepCopyInto(out *ImageTagMirrorSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSet.
+func (in *ImageTagMirrorSet) DeepCopy() *ImageTagMirrorSet {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageTagMirrorSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageTagMirrorSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageTagMirrorSetList) DeepCopyInto(out *ImageTagMirrorSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImageTagMirrorSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetList.
+func (in *ImageTagMirrorSetList) DeepCopy() *ImageTagMirrorSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageTagMirrorSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageTagMirrorSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageTagMirrorSetSpec) DeepCopyInto(out *ImageTagMirrorSetSpec) {
+ *out = *in
+ if in.ImageTagMirrors != nil {
+ in, out := &in.ImageTagMirrors, &out.ImageTagMirrors
+ *out = make([]ImageTagMirrors, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetSpec.
+func (in *ImageTagMirrorSetSpec) DeepCopy() *ImageTagMirrorSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageTagMirrorSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageTagMirrorSetStatus) DeepCopyInto(out *ImageTagMirrorSetStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrorSetStatus.
+func (in *ImageTagMirrorSetStatus) DeepCopy() *ImageTagMirrorSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageTagMirrorSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageTagMirrors) DeepCopyInto(out *ImageTagMirrors) {
+ *out = *in
+ if in.Mirrors != nil {
+ in, out := &in.Mirrors, &out.Mirrors
+ *out = make([]ImageMirror, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagMirrors.
+func (in *ImageTagMirrors) DeepCopy() *ImageTagMirrors {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageTagMirrors)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Infrastructure) DeepCopyInto(out *Infrastructure) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure.
+func (in *Infrastructure) DeepCopy() *Infrastructure {
+ if in == nil {
+ return nil
+ }
+ out := new(Infrastructure)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Infrastructure) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Infrastructure, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList.
+func (in *InfrastructureList) DeepCopy() *InfrastructureList {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InfrastructureList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) {
+ *out = *in
+ out.CloudConfig = in.CloudConfig
+ in.PlatformSpec.DeepCopyInto(&out.PlatformSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec.
+func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) {
+ *out = *in
+ if in.PlatformStatus != nil {
+ in, out := &in.PlatformStatus, &out.PlatformStatus
+ *out = new(PlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus.
+func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(InfrastructureStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Ingress) DeepCopyInto(out *Ingress) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
+func (in *Ingress) DeepCopy() *Ingress {
+ if in == nil {
+ return nil
+ }
+ out := new(Ingress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Ingress) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressList) DeepCopyInto(out *IngressList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Ingress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
+func (in *IngressList) DeepCopy() *IngressList {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressPlatformSpec) DeepCopyInto(out *IngressPlatformSpec) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSIngressSpec)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPlatformSpec.
+func (in *IngressPlatformSpec) DeepCopy() *IngressPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
+ *out = *in
+ if in.ComponentRoutes != nil {
+ in, out := &in.ComponentRoutes, &out.ComponentRoutes
+ *out = make([]ComponentRouteSpec, len(*in))
+ copy(*out, *in)
+ }
+ if in.RequiredHSTSPolicies != nil {
+ in, out := &in.RequiredHSTSPolicies, &out.RequiredHSTSPolicies
+ *out = make([]RequiredHSTSPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
+func (in *IngressSpec) DeepCopy() *IngressSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
+ *out = *in
+ if in.ComponentRoutes != nil {
+ in, out := &in.ComponentRoutes, &out.ComponentRoutes
+ *out = make([]ComponentRouteStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
+func (in *IngressStatus) DeepCopy() *IngressStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile.
+func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(IntermediateTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) {
+ *out = *in
+ out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider.
+func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(KeystoneIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) {
+ *out = *in
+ out.ConnectionOverrides = in.ConnectionOverrides
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig.
+func (in *KubeClientConfig) DeepCopy() *KubeClientConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeClientConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec.
+func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubevirtPlatformStatus) DeepCopyInto(out *KubevirtPlatformStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformStatus.
+func (in *KubevirtPlatformStatus) DeepCopy() *KubevirtPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(KubevirtPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping.
+func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPAttributeMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) {
+ *out = *in
+ out.BindPassword = in.BindPassword
+ out.CA = in.CA
+ in.Attributes.DeepCopyInto(&out.Attributes)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider.
+func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LeaderElection) DeepCopyInto(out *LeaderElection) {
+ *out = *in
+ out.LeaseDuration = in.LeaseDuration
+ out.RenewDeadline = in.RenewDeadline
+ out.RetryPeriod = in.RetryPeriod
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection.
+func (in *LeaderElection) DeepCopy() *LeaderElection {
+ if in == nil {
+ return nil
+ }
+ out := new(LeaderElection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) {
+ *out = *in
+ in.Platform.DeepCopyInto(&out.Platform)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer.
+func (in *LoadBalancer) DeepCopy() *LoadBalancer {
+ if in == nil {
+ return nil
+ }
+ out := new(LoadBalancer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MTUMigration) DeepCopyInto(out *MTUMigration) {
+ *out = *in
+ if in.Network != nil {
+ in, out := &in.Network, &out.Network
+ *out = new(MTUMigrationValues)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Machine != nil {
+ in, out := &in.Machine, &out.Machine
+ *out = new(MTUMigrationValues)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration.
+func (in *MTUMigration) DeepCopy() *MTUMigration {
+ if in == nil {
+ return nil
+ }
+ out := new(MTUMigration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) {
+ *out = *in
+ if in.To != nil {
+ in, out := &in.To, &out.To
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = new(uint32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues.
+func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues {
+ if in == nil {
+ return nil
+ }
+ out := new(MTUMigrationValues)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MaxAgePolicy) DeepCopyInto(out *MaxAgePolicy) {
+ *out = *in
+ if in.LargestMaxAge != nil {
+ in, out := &in.LargestMaxAge, &out.LargestMaxAge
+ *out = new(int32)
+ **out = **in
+ }
+ if in.SmallestMaxAge != nil {
+ in, out := &in.SmallestMaxAge, &out.SmallestMaxAge
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaxAgePolicy.
+func (in *MaxAgePolicy) DeepCopy() *MaxAgePolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(MaxAgePolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile.
+func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(ModernTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) {
+ *out = *in
+ if in.Names != nil {
+ in, out := &in.Names, &out.Names
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate.
+func (in *NamedCertificate) DeepCopy() *NamedCertificate {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedCertificate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Network) DeepCopyInto(out *Network) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
+func (in *Network) DeepCopy() *Network {
+ if in == nil {
+ return nil
+ }
+ out := new(Network)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Network) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkDiagnostics) DeepCopyInto(out *NetworkDiagnostics) {
+ *out = *in
+ in.SourcePlacement.DeepCopyInto(&out.SourcePlacement)
+ in.TargetPlacement.DeepCopyInto(&out.TargetPlacement)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnostics.
+func (in *NetworkDiagnostics) DeepCopy() *NetworkDiagnostics {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkDiagnostics)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkDiagnosticsSourcePlacement) DeepCopyInto(out *NetworkDiagnosticsSourcePlacement) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsSourcePlacement.
+func (in *NetworkDiagnosticsSourcePlacement) DeepCopy() *NetworkDiagnosticsSourcePlacement {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkDiagnosticsSourcePlacement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkDiagnosticsTargetPlacement) DeepCopyInto(out *NetworkDiagnosticsTargetPlacement) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDiagnosticsTargetPlacement.
+func (in *NetworkDiagnosticsTargetPlacement) DeepCopy() *NetworkDiagnosticsTargetPlacement {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkDiagnosticsTargetPlacement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkList) DeepCopyInto(out *NetworkList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Network, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList.
+func (in *NetworkList) DeepCopy() *NetworkList {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) {
+ *out = *in
+ if in.MTU != nil {
+ in, out := &in.MTU, &out.MTU
+ *out = new(MTUMigration)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration.
+func (in *NetworkMigration) DeepCopy() *NetworkMigration {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkMigration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
+ *out = *in
+ if in.ClusterNetwork != nil {
+ in, out := &in.ClusterNetwork, &out.ClusterNetwork
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServiceNetwork != nil {
+ in, out := &in.ServiceNetwork, &out.ServiceNetwork
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExternalIP != nil {
+ in, out := &in.ExternalIP, &out.ExternalIP
+ *out = new(ExternalIPConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ in.NetworkDiagnostics.DeepCopyInto(&out.NetworkDiagnostics)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
+func (in *NetworkSpec) DeepCopy() *NetworkSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) {
+ *out = *in
+ if in.ClusterNetwork != nil {
+ in, out := &in.ClusterNetwork, &out.ClusterNetwork
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServiceNetwork != nil {
+ in, out := &in.ServiceNetwork, &out.ServiceNetwork
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Migration != nil {
+ in, out := &in.Migration, &out.Migration
+ *out = new(NetworkMigration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus.
+func (in *NetworkStatus) DeepCopy() *NetworkStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Node) DeepCopyInto(out *Node) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
+func (in *Node) DeepCopy() *Node {
+ if in == nil {
+ return nil
+ }
+ out := new(Node)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Node) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeList) DeepCopyInto(out *NodeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Node, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList.
+func (in *NodeList) DeepCopy() *NodeList {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
+func (in *NodeSpec) DeepCopy() *NodeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
+func (in *NodeStatus) DeepCopy() *NodeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixFailureDomain) DeepCopyInto(out *NutanixFailureDomain) {
+ *out = *in
+ in.Cluster.DeepCopyInto(&out.Cluster)
+ if in.Subnets != nil {
+ in, out := &in.Subnets, &out.Subnets
+ *out = make([]NutanixResourceIdentifier, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixFailureDomain.
+func (in *NutanixFailureDomain) DeepCopy() *NutanixFailureDomain {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixFailureDomain)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixPlatformLoadBalancer) DeepCopyInto(out *NutanixPlatformLoadBalancer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformLoadBalancer.
+func (in *NutanixPlatformLoadBalancer) DeepCopy() *NutanixPlatformLoadBalancer {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixPlatformLoadBalancer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) {
+ *out = *in
+ out.PrismCentral = in.PrismCentral
+ if in.PrismElements != nil {
+ in, out := &in.PrismElements, &out.PrismElements
+ *out = make([]NutanixPrismElementEndpoint, len(*in))
+ copy(*out, *in)
+ }
+ if in.FailureDomains != nil {
+ in, out := &in.FailureDomains, &out.FailureDomains
+ *out = make([]NutanixFailureDomain, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformSpec.
+func (in *NutanixPlatformSpec) DeepCopy() *NutanixPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixPlatformStatus) DeepCopyInto(out *NutanixPlatformStatus) {
+ *out = *in
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.LoadBalancer != nil {
+ in, out := &in.LoadBalancer, &out.LoadBalancer
+ *out = new(NutanixPlatformLoadBalancer)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformStatus.
+func (in *NutanixPlatformStatus) DeepCopy() *NutanixPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixPrismElementEndpoint) DeepCopyInto(out *NutanixPrismElementEndpoint) {
+ *out = *in
+ out.Endpoint = in.Endpoint
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismElementEndpoint.
+func (in *NutanixPrismElementEndpoint) DeepCopy() *NutanixPrismElementEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixPrismElementEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixPrismEndpoint) DeepCopyInto(out *NutanixPrismEndpoint) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPrismEndpoint.
+func (in *NutanixPrismEndpoint) DeepCopy() *NutanixPrismEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixPrismEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) {
+ *out = *in
+ if in.UUID != nil {
+ in, out := &in.UUID, &out.UUID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier.
+func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixResourceIdentifier)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuth) DeepCopyInto(out *OAuth) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth.
+func (in *OAuth) DeepCopy() *OAuth {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuth)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuth) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthList) DeepCopyInto(out *OAuthList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OAuth, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList.
+func (in *OAuthList) DeepCopy() *OAuthList {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) {
+ *out = *in
+ out.CA = in.CA
+ out.TLSClientCert = in.TLSClientCert
+ out.TLSClientKey = in.TLSClientKey
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo.
+func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthRemoteConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) {
+ *out = *in
+ if in.IdentityProviders != nil {
+ in, out := &in.IdentityProviders, &out.IdentityProviders
+ *out = make([]IdentityProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.TokenConfig.DeepCopyInto(&out.TokenConfig)
+ out.Templates = in.Templates
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec.
+func (in *OAuthSpec) DeepCopy() *OAuthSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus.
+func (in *OAuthStatus) DeepCopy() *OAuthStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) {
+ *out = *in
+ out.Login = in.Login
+ out.ProviderSelection = in.ProviderSelection
+ out.Error = in.Error
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates.
+func (in *OAuthTemplates) DeepCopy() *OAuthTemplates {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthTemplates)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCClientConfig) DeepCopyInto(out *OIDCClientConfig) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ if in.ExtraScopes != nil {
+ in, out := &in.ExtraScopes, &out.ExtraScopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientConfig.
+func (in *OIDCClientConfig) DeepCopy() *OIDCClientConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCClientConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCClientReference) DeepCopyInto(out *OIDCClientReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientReference.
+func (in *OIDCClientReference) DeepCopy() *OIDCClientReference {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCClientReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCClientStatus) DeepCopyInto(out *OIDCClientStatus) {
+ *out = *in
+ if in.CurrentOIDCClients != nil {
+ in, out := &in.CurrentOIDCClients, &out.CurrentOIDCClients
+ *out = make([]OIDCClientReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.ConsumingUsers != nil {
+ in, out := &in.ConsumingUsers, &out.ConsumingUsers
+ *out = make([]ConsumingUser, len(*in))
+ copy(*out, *in)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCClientStatus.
+func (in *OIDCClientStatus) DeepCopy() *OIDCClientStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCClientStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCProvider) DeepCopyInto(out *OIDCProvider) {
+ *out = *in
+ in.Issuer.DeepCopyInto(&out.Issuer)
+ if in.OIDCClients != nil {
+ in, out := &in.OIDCClients, &out.OIDCClients
+ *out = make([]OIDCClientConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.ClaimMappings.DeepCopyInto(&out.ClaimMappings)
+ if in.ClaimValidationRules != nil {
+ in, out := &in.ClaimValidationRules, &out.ClaimValidationRules
+ *out = make([]TokenClaimValidationRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCProvider.
+func (in *OIDCProvider) DeepCopy() *OIDCProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile.
+func (in *OldTLSProfile) DeepCopy() *OldTLSProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(OldTLSProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
+ *out = *in
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]OpenIDClaim, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims.
+func (in *OpenIDClaims) DeepCopy() *OpenIDClaims {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDClaims)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) {
+ *out = *in
+ out.ClientSecret = in.ClientSecret
+ out.CA = in.CA
+ if in.ExtraScopes != nil {
+ in, out := &in.ExtraScopes, &out.ExtraScopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExtraAuthorizeParameters != nil {
+ in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Claims.DeepCopyInto(&out.Claims)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider.
+func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackPlatformLoadBalancer) DeepCopyInto(out *OpenStackPlatformLoadBalancer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformLoadBalancer.
+func (in *OpenStackPlatformLoadBalancer) DeepCopy() *OpenStackPlatformLoadBalancer {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackPlatformLoadBalancer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) {
+ *out = *in
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.MachineNetworks != nil {
+ in, out := &in.MachineNetworks, &out.MachineNetworks
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec.
+func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) {
+ *out = *in
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.LoadBalancer != nil {
+ in, out := &in.LoadBalancer, &out.LoadBalancer
+ *out = new(OpenStackPlatformLoadBalancer)
+ **out = **in
+ }
+ if in.MachineNetworks != nil {
+ in, out := &in.MachineNetworks, &out.MachineNetworks
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus.
+func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperandVersion) DeepCopyInto(out *OperandVersion) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion.
+func (in *OperandVersion) DeepCopy() *OperandVersion {
+ if in == nil {
+ return nil
+ }
+ out := new(OperandVersion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHub) DeepCopyInto(out *OperatorHub) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub.
+func (in *OperatorHub) DeepCopy() *OperatorHub {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHub)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorHub) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OperatorHub, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList.
+func (in *OperatorHubList) DeepCopy() *OperatorHubList {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHubList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OperatorHubList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) {
+ *out = *in
+ if in.Sources != nil {
+ in, out := &in.Sources, &out.Sources
+ *out = make([]HubSource, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec.
+func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHubSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) {
+ *out = *in
+ if in.Sources != nil {
+ in, out := &in.Sources, &out.Sources
+ *out = make([]HubSourceStatus, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus.
+func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorHubStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OvirtPlatformLoadBalancer) DeepCopyInto(out *OvirtPlatformLoadBalancer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformLoadBalancer.
+func (in *OvirtPlatformLoadBalancer) DeepCopy() *OvirtPlatformLoadBalancer {
+ if in == nil {
+ return nil
+ }
+ out := new(OvirtPlatformLoadBalancer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformSpec.
+func (in *OvirtPlatformSpec) DeepCopy() *OvirtPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OvirtPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) {
+ *out = *in
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.LoadBalancer != nil {
+ in, out := &in.LoadBalancer, &out.LoadBalancer
+ *out = new(OvirtPlatformLoadBalancer)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus.
+func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OvirtPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSPlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new(AzurePlatformSpec)
+ **out = **in
+ }
+ if in.GCP != nil {
+ in, out := &in.GCP, &out.GCP
+ *out = new(GCPPlatformSpec)
+ **out = **in
+ }
+ if in.BareMetal != nil {
+ in, out := &in.BareMetal, &out.BareMetal
+ *out = new(BareMetalPlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OpenStack != nil {
+ in, out := &in.OpenStack, &out.OpenStack
+ *out = new(OpenStackPlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ovirt != nil {
+ in, out := &in.Ovirt, &out.Ovirt
+ *out = new(OvirtPlatformSpec)
+ **out = **in
+ }
+ if in.VSphere != nil {
+ in, out := &in.VSphere, &out.VSphere
+ *out = new(VSpherePlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IBMCloud != nil {
+ in, out := &in.IBMCloud, &out.IBMCloud
+ *out = new(IBMCloudPlatformSpec)
+ **out = **in
+ }
+ if in.Kubevirt != nil {
+ in, out := &in.Kubevirt, &out.Kubevirt
+ *out = new(KubevirtPlatformSpec)
+ **out = **in
+ }
+ if in.EquinixMetal != nil {
+ in, out := &in.EquinixMetal, &out.EquinixMetal
+ *out = new(EquinixMetalPlatformSpec)
+ **out = **in
+ }
+ if in.PowerVS != nil {
+ in, out := &in.PowerVS, &out.PowerVS
+ *out = new(PowerVSPlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AlibabaCloud != nil {
+ in, out := &in.AlibabaCloud, &out.AlibabaCloud
+ *out = new(AlibabaCloudPlatformSpec)
+ **out = **in
+ }
+ if in.Nutanix != nil {
+ in, out := &in.Nutanix, &out.Nutanix
+ *out = new(NutanixPlatformSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.External != nil {
+ in, out := &in.External, &out.External
+ *out = new(ExternalPlatformSpec)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec.
+func (in *PlatformSpec) DeepCopy() *PlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new(AzurePlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GCP != nil {
+ in, out := &in.GCP, &out.GCP
+ *out = new(GCPPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BareMetal != nil {
+ in, out := &in.BareMetal, &out.BareMetal
+ *out = new(BareMetalPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OpenStack != nil {
+ in, out := &in.OpenStack, &out.OpenStack
+ *out = new(OpenStackPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Ovirt != nil {
+ in, out := &in.Ovirt, &out.Ovirt
+ *out = new(OvirtPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VSphere != nil {
+ in, out := &in.VSphere, &out.VSphere
+ *out = new(VSpherePlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IBMCloud != nil {
+ in, out := &in.IBMCloud, &out.IBMCloud
+ *out = new(IBMCloudPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Kubevirt != nil {
+ in, out := &in.Kubevirt, &out.Kubevirt
+ *out = new(KubevirtPlatformStatus)
+ **out = **in
+ }
+ if in.EquinixMetal != nil {
+ in, out := &in.EquinixMetal, &out.EquinixMetal
+ *out = new(EquinixMetalPlatformStatus)
+ **out = **in
+ }
+ if in.PowerVS != nil {
+ in, out := &in.PowerVS, &out.PowerVS
+ *out = new(PowerVSPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AlibabaCloud != nil {
+ in, out := &in.AlibabaCloud, &out.AlibabaCloud
+ *out = new(AlibabaCloudPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Nutanix != nil {
+ in, out := &in.Nutanix, &out.Nutanix
+ *out = new(NutanixPlatformStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.External != nil {
+ in, out := &in.External, &out.External
+ *out = new(ExternalPlatformStatus)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus.
+func (in *PlatformStatus) DeepCopy() *PlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) {
+ *out = *in
+ if in.ServiceEndpoints != nil {
+ in, out := &in.ServiceEndpoints, &out.ServiceEndpoints
+ *out = make([]PowerVSServiceEndpoint, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec.
+func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSPlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSPlatformStatus) DeepCopyInto(out *PowerVSPlatformStatus) {
+ *out = *in
+ if in.ServiceEndpoints != nil {
+ in, out := &in.ServiceEndpoints, &out.ServiceEndpoints
+ *out = make([]PowerVSServiceEndpoint, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformStatus.
+func (in *PowerVSPlatformStatus) DeepCopy() *PowerVSPlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSPlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSServiceEndpoint) DeepCopyInto(out *PowerVSServiceEndpoint) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSServiceEndpoint.
+func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSServiceEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrefixedClaimMapping) DeepCopyInto(out *PrefixedClaimMapping) {
+ *out = *in
+ out.TokenClaimMapping = in.TokenClaimMapping
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimMapping.
+func (in *PrefixedClaimMapping) DeepCopy() *PrefixedClaimMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(PrefixedClaimMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProfileCustomizations) DeepCopyInto(out *ProfileCustomizations) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileCustomizations.
+func (in *ProfileCustomizations) DeepCopy() *ProfileCustomizations {
+ if in == nil {
+ return nil
+ }
+ out := new(ProfileCustomizations)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Project) DeepCopyInto(out *Project) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
+func (in *Project) DeepCopy() *Project {
+ if in == nil {
+ return nil
+ }
+ out := new(Project)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Project) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectList) DeepCopyInto(out *ProjectList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
+func (in *ProjectList) DeepCopy() *ProjectList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
+ *out = *in
+ out.ProjectRequestTemplate = in.ProjectRequestTemplate
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
+func (in *ProjectSpec) DeepCopy() *ProjectSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
+func (in *ProjectStatus) DeepCopy() *ProjectStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PromQLClusterCondition) DeepCopyInto(out *PromQLClusterCondition) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromQLClusterCondition.
+func (in *PromQLClusterCondition) DeepCopy() *PromQLClusterCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(PromQLClusterCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Proxy) DeepCopyInto(out *Proxy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy.
+func (in *Proxy) DeepCopy() *Proxy {
+ if in == nil {
+ return nil
+ }
+ out := new(Proxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Proxy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxyList) DeepCopyInto(out *ProxyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Proxy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList.
+func (in *ProxyList) DeepCopy() *ProxyList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProxyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxySpec) DeepCopyInto(out *ProxySpec) {
+ *out = *in
+ if in.ReadinessEndpoints != nil {
+ in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.TrustedCA = in.TrustedCA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec.
+func (in *ProxySpec) DeepCopy() *ProxySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus.
+func (in *ProxyStatus) DeepCopy() *ProxyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation.
+func (in *RegistryLocation) DeepCopy() *RegistryLocation {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistryLocation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistrySources) DeepCopyInto(out *RegistrySources) {
+ *out = *in
+ if in.InsecureRegistries != nil {
+ in, out := &in.InsecureRegistries, &out.InsecureRegistries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.BlockedRegistries != nil {
+ in, out := &in.BlockedRegistries, &out.BlockedRegistries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AllowedRegistries != nil {
+ in, out := &in.AllowedRegistries, &out.AllowedRegistries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ContainerRuntimeSearchRegistries != nil {
+ in, out := &in.ContainerRuntimeSearchRegistries, &out.ContainerRuntimeSearchRegistries
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources.
+func (in *RegistrySources) DeepCopy() *RegistrySources {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistrySources)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Release) DeepCopyInto(out *Release) {
+ *out = *in
+ if in.Channels != nil {
+ in, out := &in.Channels, &out.Channels
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release.
+func (in *Release) DeepCopy() *Release {
+ if in == nil {
+ return nil
+ }
+ out := new(Release)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo.
+func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) {
+ *out = *in
+ if in.Mirrors != nil {
+ in, out := &in.Mirrors, &out.Mirrors
+ *out = make([]Mirror, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors.
+func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors {
+ if in == nil {
+ return nil
+ }
+ out := new(RepositoryDigestMirrors)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) {
+ *out = *in
+ out.ClientCA = in.ClientCA
+ if in.ClientCommonNames != nil {
+ in, out := &in.ClientCommonNames, &out.ClientCommonNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsernameHeaders != nil {
+ in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NameHeaders != nil {
+ in, out := &in.NameHeaders, &out.NameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.EmailHeaders != nil {
+ in, out := &in.EmailHeaders, &out.EmailHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider.
+func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(RequestHeaderIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequiredHSTSPolicy) DeepCopyInto(out *RequiredHSTSPolicy) {
+ *out = *in
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DomainPatterns != nil {
+ in, out := &in.DomainPatterns, &out.DomainPatterns
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.MaxAge.DeepCopyInto(&out.MaxAge)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredHSTSPolicy.
+func (in *RequiredHSTSPolicy) DeepCopy() *RequiredHSTSPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(RequiredHSTSPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scheduler) DeepCopyInto(out *Scheduler) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler.
+func (in *Scheduler) DeepCopy() *Scheduler {
+ if in == nil {
+ return nil
+ }
+ out := new(Scheduler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scheduler) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerList) DeepCopyInto(out *SchedulerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Scheduler, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList.
+func (in *SchedulerList) DeepCopy() *SchedulerList {
+ if in == nil {
+ return nil
+ }
+ out := new(SchedulerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SchedulerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) {
+ *out = *in
+ out.Policy = in.Policy
+ out.ProfileCustomizations = in.ProfileCustomizations
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec.
+func (in *SchedulerSpec) DeepCopy() *SchedulerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SchedulerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus.
+func (in *SchedulerStatus) DeepCopy() *SchedulerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SchedulerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference.
+func (in *SecretNameReference) DeepCopy() *SecretNameReference {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretNameReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServingInfo) DeepCopyInto(out *ServingInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ if in.NamedCertificates != nil {
+ in, out := &in.NamedCertificates, &out.NamedCertificates
+ *out = make([]NamedCertificate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CipherSuites != nil {
+ in, out := &in.CipherSuites, &out.CipherSuites
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo.
+func (in *ServingInfo) DeepCopy() *ServingInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ServingInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SignatureStore) DeepCopyInto(out *SignatureStore) {
+ *out = *in
+ out.CA = in.CA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureStore.
+func (in *SignatureStore) DeepCopy() *SignatureStore {
+ if in == nil {
+ return nil
+ }
+ out := new(SignatureStore)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StringSource) DeepCopyInto(out *StringSource) {
+ *out = *in
+ out.StringSourceSpec = in.StringSourceSpec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource.
+func (in *StringSource) DeepCopy() *StringSource {
+ if in == nil {
+ return nil
+ }
+ out := new(StringSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec.
+func (in *StringSourceSpec) DeepCopy() *StringSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StringSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) {
+ *out = *in
+ if in.Ciphers != nil {
+ in, out := &in.Ciphers, &out.Ciphers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec.
+func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSProfileSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) {
+ *out = *in
+ if in.Old != nil {
+ in, out := &in.Old, &out.Old
+ *out = new(OldTLSProfile)
+ **out = **in
+ }
+ if in.Intermediate != nil {
+ in, out := &in.Intermediate, &out.Intermediate
+ *out = new(IntermediateTLSProfile)
+ **out = **in
+ }
+ if in.Modern != nil {
+ in, out := &in.Modern, &out.Modern
+ *out = new(ModernTLSProfile)
+ **out = **in
+ }
+ if in.Custom != nil {
+ in, out := &in.Custom, &out.Custom
+ *out = new(CustomTLSProfile)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile.
+func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSSecurityProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateReference) DeepCopyInto(out *TemplateReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference.
+func (in *TemplateReference) DeepCopy() *TemplateReference {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TestDetails) DeepCopyInto(out *TestDetails) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestDetails.
+func (in *TestDetails) DeepCopy() *TestDetails {
+ if in == nil {
+ return nil
+ }
+ out := new(TestDetails)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TestReporting) DeepCopyInto(out *TestReporting) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReporting.
+func (in *TestReporting) DeepCopy() *TestReporting {
+ if in == nil {
+ return nil
+ }
+ out := new(TestReporting)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TestReportingSpec) DeepCopyInto(out *TestReportingSpec) {
+ *out = *in
+ if in.TestsForFeatureGates != nil {
+ in, out := &in.TestsForFeatureGates, &out.TestsForFeatureGates
+ *out = make([]FeatureGateTests, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingSpec.
+func (in *TestReportingSpec) DeepCopy() *TestReportingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TestReportingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TestReportingStatus) DeepCopyInto(out *TestReportingStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestReportingStatus.
+func (in *TestReportingStatus) DeepCopy() *TestReportingStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TestReportingStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenClaimMapping) DeepCopyInto(out *TokenClaimMapping) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMapping.
+func (in *TokenClaimMapping) DeepCopy() *TokenClaimMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenClaimMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) {
+ *out = *in
+ in.Username.DeepCopyInto(&out.Username)
+ out.Groups = in.Groups
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimMappings.
+func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenClaimMappings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) {
+ *out = *in
+ if in.RequiredClaim != nil {
+ in, out := &in.RequiredClaim, &out.RequiredClaim
+ *out = new(TokenRequiredClaim)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimValidationRule.
+func (in *TokenClaimValidationRule) DeepCopy() *TokenClaimValidationRule {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenClaimValidationRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenConfig) DeepCopyInto(out *TokenConfig) {
+ *out = *in
+ if in.AccessTokenInactivityTimeout != nil {
+ in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig.
+func (in *TokenConfig) DeepCopy() *TokenConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenIssuer) DeepCopyInto(out *TokenIssuer) {
+ *out = *in
+ if in.Audiences != nil {
+ in, out := &in.Audiences, &out.Audiences
+ *out = make([]TokenAudience, len(*in))
+ copy(*out, *in)
+ }
+ out.CertificateAuthority = in.CertificateAuthority
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenIssuer.
+func (in *TokenIssuer) DeepCopy() *TokenIssuer {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenIssuer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequiredClaim) DeepCopyInto(out *TokenRequiredClaim) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequiredClaim.
+func (in *TokenRequiredClaim) DeepCopy() *TokenRequiredClaim {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenRequiredClaim)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Update) DeepCopyInto(out *Update) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update.
+func (in *Update) DeepCopy() *Update {
+ if in == nil {
+ return nil
+ }
+ out := new(Update)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) {
+ *out = *in
+ in.StartedTime.DeepCopyInto(&out.StartedTime)
+ if in.CompletionTime != nil {
+ in, out := &in.CompletionTime, &out.CompletionTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory.
+func (in *UpdateHistory) DeepCopy() *UpdateHistory {
+ if in == nil {
+ return nil
+ }
+ out := new(UpdateHistory)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) {
+ *out = *in
+ out.TokenClaimMapping = in.TokenClaimMapping
+ if in.Prefix != nil {
+ in, out := &in.Prefix, &out.Prefix
+ *out = new(UsernamePrefix)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernameClaimMapping.
+func (in *UsernameClaimMapping) DeepCopy() *UsernameClaimMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(UsernameClaimMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UsernamePrefix) DeepCopyInto(out *UsernamePrefix) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsernamePrefix.
+func (in *UsernamePrefix) DeepCopy() *UsernamePrefix {
+ if in == nil {
+ return nil
+ }
+ out := new(UsernamePrefix)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) {
+ *out = *in
+ in.Topology.DeepCopyInto(&out.Topology)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformFailureDomainSpec.
+func (in *VSpherePlatformFailureDomainSpec) DeepCopy() *VSpherePlatformFailureDomainSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformFailureDomainSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformLoadBalancer) DeepCopyInto(out *VSpherePlatformLoadBalancer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformLoadBalancer.
+func (in *VSpherePlatformLoadBalancer) DeepCopy() *VSpherePlatformLoadBalancer {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformLoadBalancer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformNodeNetworking) DeepCopyInto(out *VSpherePlatformNodeNetworking) {
+ *out = *in
+ in.External.DeepCopyInto(&out.External)
+ in.Internal.DeepCopyInto(&out.Internal)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworking.
+func (in *VSpherePlatformNodeNetworking) DeepCopy() *VSpherePlatformNodeNetworking {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformNodeNetworking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformNodeNetworkingSpec) DeepCopyInto(out *VSpherePlatformNodeNetworkingSpec) {
+ *out = *in
+ if in.NetworkSubnetCIDR != nil {
+ in, out := &in.NetworkSubnetCIDR, &out.NetworkSubnetCIDR
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExcludeNetworkSubnetCIDR != nil {
+ in, out := &in.ExcludeNetworkSubnetCIDR, &out.ExcludeNetworkSubnetCIDR
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformNodeNetworkingSpec.
+func (in *VSpherePlatformNodeNetworkingSpec) DeepCopy() *VSpherePlatformNodeNetworkingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformNodeNetworkingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformSpec) DeepCopyInto(out *VSpherePlatformSpec) {
+ *out = *in
+ if in.VCenters != nil {
+ in, out := &in.VCenters, &out.VCenters
+ *out = make([]VSpherePlatformVCenterSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FailureDomains != nil {
+ in, out := &in.FailureDomains, &out.FailureDomains
+ *out = make([]VSpherePlatformFailureDomainSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.NodeNetworking.DeepCopyInto(&out.NodeNetworking)
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.MachineNetworks != nil {
+ in, out := &in.MachineNetworks, &out.MachineNetworks
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformSpec.
+func (in *VSpherePlatformSpec) DeepCopy() *VSpherePlatformSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) {
+ *out = *in
+ if in.APIServerInternalIPs != nil {
+ in, out := &in.APIServerInternalIPs, &out.APIServerInternalIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IngressIPs != nil {
+ in, out := &in.IngressIPs, &out.IngressIPs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.LoadBalancer != nil {
+ in, out := &in.LoadBalancer, &out.LoadBalancer
+ *out = new(VSpherePlatformLoadBalancer)
+ **out = **in
+ }
+ if in.MachineNetworks != nil {
+ in, out := &in.MachineNetworks, &out.MachineNetworks
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformStatus.
+func (in *VSpherePlatformStatus) DeepCopy() *VSpherePlatformStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformTopology) DeepCopyInto(out *VSpherePlatformTopology) {
+ *out = *in
+ if in.Networks != nil {
+ in, out := &in.Networks, &out.Networks
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformTopology.
+func (in *VSpherePlatformTopology) DeepCopy() *VSpherePlatformTopology {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformTopology)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSpherePlatformVCenterSpec) DeepCopyInto(out *VSpherePlatformVCenterSpec) {
+ *out = *in
+ if in.Datacenters != nil {
+ in, out := &in.Datacenters, &out.Datacenters
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformVCenterSpec.
+func (in *VSpherePlatformVCenterSpec) DeepCopy() *VSpherePlatformVCenterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VSpherePlatformVCenterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) {
+ *out = *in
+ out.KubeConfig = in.KubeConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator.
+func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator {
+ if in == nil {
+ return nil
+ }
+ out := new(WebhookTokenAuthenticator)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..59c4bbea6f
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,491 @@
+apiservers.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: apiservers.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: APIServer
+ Labels: {}
+ PluralName: apiservers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+authentications.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: authentications.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - ExternalOIDC
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Authentication
+ Labels: {}
+ PluralName: authentications
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+builds.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: builds.config.openshift.io
+ Capability: Build
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: openshift-controller-manager
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Build
+ Labels: {}
+ PluralName: builds
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+clusteroperators.config.openshift.io:
+ Annotations:
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/497
+ CRDName: clusteroperators.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: cluster-version-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_00"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ClusterOperator
+ Labels: {}
+ PluralName: clusteroperators
+ PrinterColumns:
+ - description: The version the operator is at.
+ jsonPath: .status.versions[?(@.name=="operator")].version
+ name: Version
+ type: string
+ - description: Whether the operator is running and stable.
+ jsonPath: .status.conditions[?(@.type=="Available")].status
+ name: Available
+ type: string
+ - description: Whether the operator is processing changes.
+ jsonPath: .status.conditions[?(@.type=="Progressing")].status
+ name: Progressing
+ type: string
+ - description: Whether the operator is degraded.
+ jsonPath: .status.conditions[?(@.type=="Degraded")].status
+ name: Degraded
+ type: string
+ - description: The time the operator's Available status last changed.
+ jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime
+ name: Since
+ type: date
+ Scope: Cluster
+ ShortNames:
+ - co
+ TopLevelFeatureGates: []
+ Version: v1
+
+clusterversions.config.openshift.io:
+ Annotations:
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/495
+ CRDName: clusterversions.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - SignatureStores
+ FilenameOperatorName: cluster-version-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_00"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ClusterVersion
+ Labels: {}
+ PluralName: clusterversions
+ PrinterColumns:
+ - jsonPath: .status.history[?(@.state=="Completed")].version
+ name: Version
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Available")].status
+ name: Available
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Progressing")].status
+ name: Progressing
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime
+ name: Since
+ type: date
+ - jsonPath: .status.conditions[?(@.type=="Progressing")].message
+ name: Status
+ type: string
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+consoles.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: consoles.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Console
+ Labels: {}
+ PluralName: consoles
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+dnses.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: dnses.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: DNS
+ Labels: {}
+ PluralName: dnses
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+featuregates.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: featuregates.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: FeatureGate
+ Labels: {}
+ PluralName: featuregates
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+images.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: images.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Image
+ Labels: {}
+ PluralName: images
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+imagecontentpolicies.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/874
+ CRDName: imagecontentpolicies.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ImageContentPolicy
+ Labels: {}
+ PluralName: imagecontentpolicies
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+imagedigestmirrorsets.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1126
+ CRDName: imagedigestmirrorsets.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ImageDigestMirrorSet
+ Labels: {}
+ PluralName: imagedigestmirrorsets
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames:
+ - idms
+ TopLevelFeatureGates: []
+ Version: v1
+
+imagetagmirrorsets.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1126
+ CRDName: imagetagmirrorsets.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ImageTagMirrorSet
+ Labels: {}
+ PluralName: imagetagmirrorsets
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames:
+ - itms
+ TopLevelFeatureGates: []
+ Version: v1
+
+infrastructures.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: infrastructures.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - BareMetalLoadBalancer
+ - GCPClusterHostedDNS
+ - GCPLabelsTags
+ - VSphereControlPlaneMachineSet
+ - VSphereMultiVCenters
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Infrastructure
+ Labels: {}
+ PluralName: infrastructures
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+ingresses.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: ingresses.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Ingress
+ Labels: {}
+ PluralName: ingresses
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+networks.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: networks.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - NetworkDiagnosticsConfig
+ - NetworkLiveMigration
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: false
+ KindName: Network
+ Labels: {}
+ PluralName: networks
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+nodes.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1107
+ CRDName: nodes.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Node
+ Labels: {}
+ PluralName: nodes
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+oauths.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: oauths.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: OAuth
+ Labels: {}
+ PluralName: oauths
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+operatorhubs.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: operatorhubs.config.openshift.io
+ Capability: marketplace
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: marketplace
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_03"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: OperatorHub
+ Labels: {}
+ PluralName: operatorhubs
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+projects.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: projects.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Project
+ Labels: {}
+ PluralName: projects
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+proxies.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: proxies.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_03"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Proxy
+ Labels: {}
+ PluralName: proxies
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+schedulers.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: schedulers.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - DynamicResourceAllocation
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Scheduler
+ Labels: {}
+ PluralName: schedulers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..fcb4fb9a42
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,2600 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AdmissionConfig = map[string]string{
+ "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.",
+ "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.",
+}
+
+func (AdmissionConfig) SwaggerDoc() map[string]string {
+ return map_AdmissionConfig
+}
+
+var map_AdmissionPluginConfig = map[string]string{
+ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins",
+ "location": "Location is the path to a configuration file that contains the plugin's configuration",
+ "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.",
+}
+
+func (AdmissionPluginConfig) SwaggerDoc() map[string]string {
+ return map_AdmissionPluginConfig
+}
+
+var map_AuditConfig = map[string]string{
+ "": "AuditConfig holds configuration for the audit capabilities",
+ "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.",
+ "auditFilePath": "All requests coming to the apiserver will be logged to this file.",
+ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.",
+ "maximumRetainedFiles": "Maximum number of old log files to retain.",
+ "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.",
+ "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.",
+ "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.",
+ "logFormat": "Format of saved audits (legacy or json).",
+ "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.",
+ "webHookMode": "Strategy for sending audit events (block or batch).",
+}
+
+func (AuditConfig) SwaggerDoc() map[string]string {
+ return map_AuditConfig
+}
+
+var map_CertInfo = map[string]string{
+ "": "CertInfo relates a certificate with a private key",
+ "certFile": "CertFile is a file containing a PEM-encoded certificate",
+ "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile",
+}
+
+func (CertInfo) SwaggerDoc() map[string]string {
+ return map_CertInfo
+}
+
+var map_ClientConnectionOverrides = map[string]string{
+ "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.",
+ "contentType": "contentType is the content type used when sending data to the server from this client.",
+ "qps": "qps controls the number of queries per second allowed for this connection.",
+ "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.",
+}
+
+func (ClientConnectionOverrides) SwaggerDoc() map[string]string {
+ return map_ClientConnectionOverrides
+}
+
+var map_ConfigMapFileReference = map[string]string{
+ "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
+ "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.",
+}
+
+func (ConfigMapFileReference) SwaggerDoc() map[string]string {
+ return map_ConfigMapFileReference
+}
+
+var map_ConfigMapNameReference = map[string]string{
+ "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.",
+ "name": "name is the metadata.name of the referenced config map",
+}
+
+func (ConfigMapNameReference) SwaggerDoc() map[string]string {
+ return map_ConfigMapNameReference
+}
+
+var map_DelegatedAuthentication = map[string]string{
+ "": "DelegatedAuthentication allows authentication to be disabled.",
+ "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.",
+}
+
+func (DelegatedAuthentication) SwaggerDoc() map[string]string {
+ return map_DelegatedAuthentication
+}
+
+var map_DelegatedAuthorization = map[string]string{
+ "": "DelegatedAuthorization allows authorization to be disabled.",
+ "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.",
+}
+
+func (DelegatedAuthorization) SwaggerDoc() map[string]string {
+ return map_DelegatedAuthorization
+}
+
+var map_EtcdConnectionInfo = map[string]string{
+ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server",
+ "urls": "URLs are the URLs for etcd",
+ "ca": "CA is a file containing trusted roots for the etcd server certificates",
+}
+
+func (EtcdConnectionInfo) SwaggerDoc() map[string]string {
+ return map_EtcdConnectionInfo
+}
+
+var map_EtcdStorageConfig = map[string]string{
+ "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.",
+}
+
+func (EtcdStorageConfig) SwaggerDoc() map[string]string {
+ return map_EtcdStorageConfig
+}
+
+var map_GenericAPIServerConfig = map[string]string{
+ "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd",
+ "servingInfo": "servingInfo describes how to start serving",
+ "corsAllowedOrigins": "corsAllowedOrigins",
+ "auditConfig": "auditConfig describes how to configure audit information",
+ "storageConfig": "storageConfig contains information about how to use",
+ "admission": "admissionConfig holds information about how to configure admission.",
+}
+
+func (GenericAPIServerConfig) SwaggerDoc() map[string]string {
+ return map_GenericAPIServerConfig
+}
+
+var map_GenericControllerConfig = map[string]string{
+ "": "GenericControllerConfig provides information to configure a controller",
+ "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints",
+ "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need",
+ "authentication": "authentication allows configuration of authentication for the endpoints",
+ "authorization": "authorization allows configuration of authentication for the endpoints",
+}
+
+func (GenericControllerConfig) SwaggerDoc() map[string]string {
+ return map_GenericControllerConfig
+}
+
+var map_HTTPServingInfo = map[string]string{
+ "": "HTTPServingInfo holds configuration for serving HTTP",
+ "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.",
+ "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.",
+}
+
+func (HTTPServingInfo) SwaggerDoc() map[string]string {
+ return map_HTTPServingInfo
+}
+
+var map_KubeClientConfig = map[string]string{
+ "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config",
+ "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.",
+}
+
+func (KubeClientConfig) SwaggerDoc() map[string]string {
+ return map_KubeClientConfig
+}
+
+var map_LeaderElection = map[string]string{
+ "": "LeaderElection provides information to elect a leader",
+ "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.",
+ "namespace": "namespace indicates which namespace the resource is in",
+ "name": "name indicates what name to use for the resource",
+ "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.",
+ "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.",
+ "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.",
+}
+
+func (LeaderElection) SwaggerDoc() map[string]string {
+ return map_LeaderElection
+}
+
+var map_MaxAgePolicy = map[string]string{
+ "": "MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy",
+ "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced.",
+ "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced.",
+}
+
+func (MaxAgePolicy) SwaggerDoc() map[string]string {
+ return map_MaxAgePolicy
+}
+
+var map_NamedCertificate = map[string]string{
+ "": "NamedCertificate specifies a certificate/key, and the names it should be served for",
+ "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.",
+}
+
+func (NamedCertificate) SwaggerDoc() map[string]string {
+ return map_NamedCertificate
+}
+
+var map_RemoteConnectionInfo = map[string]string{
+ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection",
+ "url": "URL is the remote URL to connect to",
+ "ca": "CA is the CA for verifying TLS connections",
+}
+
+func (RemoteConnectionInfo) SwaggerDoc() map[string]string {
+ return map_RemoteConnectionInfo
+}
+
+var map_RequiredHSTSPolicy = map[string]string{
+ "namespaceSelector": "namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything.",
+ "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.",
+ "maxAge": "maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client.",
+ "preloadPolicy": "preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent).",
+ "includeSubDomainsPolicy": "includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com",
+}
+
+func (RequiredHSTSPolicy) SwaggerDoc() map[string]string {
+ return map_RequiredHSTSPolicy
+}
+
+var map_SecretNameReference = map[string]string{
+ "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.",
+ "name": "name is the metadata.name of the referenced secret",
+}
+
+func (SecretNameReference) SwaggerDoc() map[string]string {
+ return map_SecretNameReference
+}
+
+var map_ServingInfo = map[string]string{
+ "": "ServingInfo holds information about serving web pages",
+ "bindAddress": "BindAddress is the ip:port to serve on",
+ "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
+ "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates",
+ "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames",
+ "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants",
+ "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants",
+}
+
+func (ServingInfo) SwaggerDoc() map[string]string {
+ return map_ServingInfo
+}
+
+var map_StringSource = map[string]string{
+ "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.",
+}
+
+func (StringSource) SwaggerDoc() map[string]string {
+ return map_StringSource
+}
+
+var map_StringSourceSpec = map[string]string{
+ "": "StringSourceSpec specifies a string value, or external location",
+ "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.",
+ "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.",
+ "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.",
+ "keyFile": "KeyFile references a file containing the key to use to decrypt the value.",
+}
+
+func (StringSourceSpec) SwaggerDoc() map[string]string {
+ return map_StringSourceSpec
+}
+
+var map_APIServer = map[string]string{
+ "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (APIServer) SwaggerDoc() map[string]string {
+ return map_APIServer
+}
+
+var map_APIServerEncryption = map[string]string{
+ "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io",
+}
+
+func (APIServerEncryption) SwaggerDoc() map[string]string {
+ return map_APIServerEncryption
+}
+
+var map_APIServerList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (APIServerList) SwaggerDoc() map[string]string {
+ return map_APIServerList
+}
+
+var map_APIServerNamedServingCert = map[string]string{
+ "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.",
+ "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.",
+ "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.",
+}
+
+func (APIServerNamedServingCert) SwaggerDoc() map[string]string {
+ return map_APIServerNamedServingCert
+}
+
+var map_APIServerServingCerts = map[string]string{
+ "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.",
+}
+
+func (APIServerServingCerts) SwaggerDoc() map[string]string {
+ return map_APIServerServingCerts
+}
+
+var map_APIServerSpec = map[string]string{
+ "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.",
+ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.",
+ "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.",
+ "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.",
+ "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.",
+ "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.",
+}
+
+func (APIServerSpec) SwaggerDoc() map[string]string {
+ return map_APIServerSpec
+}
+
+var map_Audit = map[string]string{
+ "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly.\n\nIf unset, the 'Default' profile is used as the default.",
+ "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.",
+}
+
+func (Audit) SwaggerDoc() map[string]string {
+ return map_Audit
+}
+
+var map_AuditCustomRule = map[string]string{
+ "": "AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile.",
+ "group": "group is a name of group a request user must be member of in order to this profile to apply.",
+ "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nIf unset, the 'Default' profile is used as the default.",
+}
+
+func (AuditCustomRule) SwaggerDoc() map[string]string {
+ return map_AuditCustomRule
+}
+
+var map_Authentication = map[string]string{
+ "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Authentication) SwaggerDoc() map[string]string {
+ return map_Authentication
+}
+
+var map_AuthenticationList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (AuthenticationList) SwaggerDoc() map[string]string {
+ return map_AuthenticationList
+}
+
+var map_AuthenticationSpec = map[string]string{
+ "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.",
+ "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.",
+ "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.",
+ "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".",
+ "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.",
+ "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.",
+}
+
+func (AuthenticationSpec) SwaggerDoc() map[string]string {
+ return map_AuthenticationSpec
+}
+
+var map_AuthenticationStatus = map[string]string{
+ "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.",
+ "oidcClients": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.",
+}
+
+func (AuthenticationStatus) SwaggerDoc() map[string]string {
+ return map_AuthenticationStatus
+}
+
+var map_DeprecatedWebhookTokenAuthenticator = map[string]string{
+ "": "deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.",
+ "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.",
+}
+
+func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string {
+ return map_DeprecatedWebhookTokenAuthenticator
+}
+
+var map_OIDCClientConfig = map[string]string{
+ "componentName": "ComponentName is the name of the component that is supposed to consume this client configuration",
+ "componentNamespace": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration",
+ "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider",
+ "clientSecret": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field",
+ "extraScopes": "ExtraScopes is an optional set of scopes to request tokens with.",
+}
+
+func (OIDCClientConfig) SwaggerDoc() map[string]string {
+ return map_OIDCClientConfig
+}
+
+var map_OIDCClientReference = map[string]string{
+ "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`",
+ "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.",
+ "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider",
+}
+
+func (OIDCClientReference) SwaggerDoc() map[string]string {
+ return map_OIDCClientReference
+}
+
+var map_OIDCClientStatus = map[string]string{
+ "componentName": "ComponentName is the name of the component that will consume a client configuration.",
+ "componentNamespace": "ComponentNamespace is the namespace of the component that will consume a client configuration.",
+ "currentOIDCClients": "CurrentOIDCClients is a list of clients that the component is currently using.",
+ "consumingUsers": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.",
+ "conditions": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.",
+}
+
+func (OIDCClientStatus) SwaggerDoc() map[string]string {
+ return map_OIDCClientStatus
+}
+
+var map_OIDCProvider = map[string]string{
+ "name": "Name of the OIDC provider",
+ "issuer": "Issuer describes atributes of the OIDC token issuer",
+ "oidcClients": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer",
+ "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity",
+ "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.",
+}
+
+func (OIDCProvider) SwaggerDoc() map[string]string {
+ return map_OIDCProvider
+}
+
+var map_PrefixedClaimMapping = map[string]string{
+ "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".",
+}
+
+func (PrefixedClaimMapping) SwaggerDoc() map[string]string {
+ return map_PrefixedClaimMapping
+}
+
+var map_TokenClaimMapping = map[string]string{
+ "claim": "Claim is a JWT token claim to be used in the mapping",
+}
+
+func (TokenClaimMapping) SwaggerDoc() map[string]string {
+ return map_TokenClaimMapping
+}
+
+var map_TokenClaimMappings = map[string]string{
+ "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"",
+ "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.",
+}
+
+func (TokenClaimMappings) SwaggerDoc() map[string]string {
+ return map_TokenClaimMappings
+}
+
+var map_TokenClaimValidationRule = map[string]string{
+ "type": "Type sets the type of the validation rule",
+ "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value",
+}
+
+func (TokenClaimValidationRule) SwaggerDoc() map[string]string {
+ return map_TokenClaimValidationRule
+}
+
+var map_TokenIssuer = map[string]string{
+ "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.",
+ "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.",
+ "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.",
+}
+
+func (TokenIssuer) SwaggerDoc() map[string]string {
+ return map_TokenIssuer
+}
+
+var map_TokenRequiredClaim = map[string]string{
+ "claim": "Claim is a name of a required claim. Only claims with string values are supported.",
+ "requiredValue": "RequiredValue is the required value for the claim.",
+}
+
+func (TokenRequiredClaim) SwaggerDoc() map[string]string {
+ return map_TokenRequiredClaim
+}
+
+var map_UsernameClaimMapping = map[string]string{
+ "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"",
+}
+
+func (UsernameClaimMapping) SwaggerDoc() map[string]string {
+ return map_UsernameClaimMapping
+}
+
+var map_WebhookTokenAuthenticator = map[string]string{
+ "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator",
+ "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.",
+}
+
+func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
+ return map_WebhookTokenAuthenticator
+}
+
+var map_Build = map[string]string{
+ "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds user-settable values for the build controller configuration",
+}
+
+func (Build) SwaggerDoc() map[string]string {
+ return map_Build
+}
+
+var map_BuildDefaults = map[string]string{
+ "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.",
+ "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.",
+ "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
+ "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
+ "resources": "Resources defines resource requirements to execute the build.",
+}
+
+func (BuildDefaults) SwaggerDoc() map[string]string {
+ return map_BuildDefaults
+}
+
+var map_BuildList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (BuildList) SwaggerDoc() map[string]string {
+ return map_BuildList
+}
+
+var map_BuildOverrides = map[string]string{
+ "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
+ "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node",
+ "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
+ "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself",
+}
+
+func (BuildOverrides) SwaggerDoc() map[string]string {
+ return map_BuildOverrides
+}
+
+var map_BuildSpec = map[string]string{
+ "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.",
+ "buildDefaults": "BuildDefaults controls the default information for Builds",
+ "buildOverrides": "BuildOverrides controls override settings for builds",
+}
+
+func (BuildSpec) SwaggerDoc() map[string]string {
+ return map_BuildSpec
+}
+
+var map_ImageLabel = map[string]string{
+ "name": "Name defines the name of the label. It must have non-zero length.",
+ "value": "Value defines the literal value of the label.",
+}
+
+func (ImageLabel) SwaggerDoc() map[string]string {
+ return map_ImageLabel
+}
+
+var map_ClusterOperator = map[string]string{
+ "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds configuration that could apply to any operator.",
+ "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.",
+}
+
+func (ClusterOperator) SwaggerDoc() map[string]string {
+ return map_ClusterOperator
+}
+
+var map_ClusterOperatorList = map[string]string{
+ "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ClusterOperatorList) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorList
+}
+
+var map_ClusterOperatorSpec = map[string]string{
+ "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".",
+}
+
+func (ClusterOperatorSpec) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorSpec
+}
+
+var map_ClusterOperatorStatus = map[string]string{
+ "": "ClusterOperatorStatus provides information about the status of the operator.",
+ "conditions": "conditions describes the state of the operator's managed and monitored components.",
+ "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.",
+ "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces",
+ "extension": "extension contains any additional status information specific to the operator which owns this status object.",
+}
+
+func (ClusterOperatorStatus) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorStatus
+}
+
+var map_ClusterOperatorStatusCondition = map[string]string{
+ "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.",
+ "type": "type specifies the aspect reported by this condition.",
+ "status": "status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.",
+ "reason": "reason is the CamelCase reason for the condition's current status.",
+ "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.",
+}
+
+func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string {
+ return map_ClusterOperatorStatusCondition
+}
+
+var map_ObjectReference = map[string]string{
+ "": "ObjectReference contains enough information to let you inspect or modify the referred object.",
+ "group": "group of the referent.",
+ "resource": "resource of the referent.",
+ "namespace": "namespace of the referent.",
+ "name": "name of the referent.",
+}
+
+func (ObjectReference) SwaggerDoc() map[string]string {
+ return map_ObjectReference
+}
+
+var map_OperandVersion = map[string]string{
+ "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.",
+ "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0",
+}
+
+func (OperandVersion) SwaggerDoc() map[string]string {
+ return map_OperandVersion
+}
+
+var map_ClusterCondition = map[string]string{
+ "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.",
+ "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.",
+ "promql": "promQL represents a cluster condition based on PromQL.",
+}
+
+func (ClusterCondition) SwaggerDoc() map[string]string {
+ return map_ClusterCondition
+}
+
+var map_ClusterVersion = map[string]string{
+ "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.",
+ "status": "status contains information about the available updates and any in-progress updates.",
+}
+
+func (ClusterVersion) SwaggerDoc() map[string]string {
+ return map_ClusterVersion
+}
+
+var map_ClusterVersionCapabilitiesSpec = map[string]string{
+ "": "ClusterVersionCapabilitiesSpec selects the managed set of optional, core cluster components.",
+ "baselineCapabilitySet": "baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent.",
+ "additionalEnabledCapabilities": "additionalEnabledCapabilities extends the set of managed capabilities beyond the baseline defined in baselineCapabilitySet. The default is an empty set.",
+}
+
+func (ClusterVersionCapabilitiesSpec) SwaggerDoc() map[string]string {
+ return map_ClusterVersionCapabilitiesSpec
+}
+
+var map_ClusterVersionCapabilitiesStatus = map[string]string{
+ "": "ClusterVersionCapabilitiesStatus describes the state of optional, core cluster components.",
+ "enabledCapabilities": "enabledCapabilities lists all the capabilities that are currently managed.",
+ "knownCapabilities": "knownCapabilities lists all the capabilities known to the current cluster.",
+}
+
+func (ClusterVersionCapabilitiesStatus) SwaggerDoc() map[string]string {
+ return map_ClusterVersionCapabilitiesStatus
+}
+
+var map_ClusterVersionList = map[string]string{
+ "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ClusterVersionList) SwaggerDoc() map[string]string {
+ return map_ClusterVersionList
+}
+
+var map_ClusterVersionSpec = map[string]string{
+ "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.",
+ "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.",
+ "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. You should not do this. version is silently ignored and image is used. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.",
+ "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.",
+ "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.",
+ "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.",
+ "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.",
+ "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.",
+}
+
+func (ClusterVersionSpec) SwaggerDoc() map[string]string {
+ return map_ClusterVersionSpec
+}
+
+var map_ClusterVersionStatus = map[string]string{
+ "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.",
+ "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.",
+ "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.",
+ "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.",
+ "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.",
+ "capabilities": "capabilities describes the state of optional, core cluster components.",
+ "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.",
+ "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.",
+ "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.",
+}
+
+func (ClusterVersionStatus) SwaggerDoc() map[string]string {
+ return map_ClusterVersionStatus
+}
+
+var map_ComponentOverride = map[string]string{
+ "": "ComponentOverride allows overriding cluster version operator's behavior for a component.",
+ "kind": "kind indentifies which object to override.",
+ "group": "group identifies the API group that the kind is in.",
+ "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.",
+ "name": "name is the component's name.",
+ "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false",
+}
+
+func (ComponentOverride) SwaggerDoc() map[string]string {
+ return map_ComponentOverride
+}
+
+var map_ConditionalUpdate = map[string]string{
+ "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.",
+ "release": "release is the target of the update.",
+ "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.",
+ "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Recommended, for whether the update is recommended for the current cluster.",
+}
+
+func (ConditionalUpdate) SwaggerDoc() map[string]string {
+ return map_ConditionalUpdate
+}
+
+var map_ConditionalUpdateRisk = map[string]string{
+ "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.",
+ "url": "url contains information about this risk.",
+ "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.",
+ "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.",
+ "matchingRules": "matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended.",
+}
+
+func (ConditionalUpdateRisk) SwaggerDoc() map[string]string {
+ return map_ConditionalUpdateRisk
+}
+
+var map_PromQLClusterCondition = map[string]string{
+ "": "PromQLClusterCondition represents a cluster condition based on PromQL.",
+ "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.",
+}
+
+func (PromQLClusterCondition) SwaggerDoc() map[string]string {
+ return map_PromQLClusterCondition
+}
+
+var map_Release = map[string]string{
+ "": "Release represents an OpenShift release image and associated metadata.",
+ "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.",
+ "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.",
+ "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.",
+ "channels": "channels is the set of Cincinnati channels to which the release currently belongs.",
+}
+
+func (Release) SwaggerDoc() map[string]string {
+ return map_Release
+}
+
+var map_SignatureStore = map[string]string{
+ "": "SignatureStore represents the URL of custom Signature Store",
+ "url": "url contains the upstream custom signature store URL. url should be a valid absolute http/https URI of an upstream signature store as per rfc1738. This must be provided and cannot be empty.",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the signature store is not honored. If the specified ca data is not valid, the signature store is not honored. If empty, we fall back to the CA configured via Proxy, which is appended to the default system roots. The namespace for this config map is openshift-config.",
+}
+
+func (SignatureStore) SwaggerDoc() map[string]string {
+ return map_SignatureStore
+}
+
+var map_Update = map[string]string{
+ "": "Update represents an administrator update request.",
+ "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.",
+ "version": "version is a semantic version identifying the update version. version is ignored if image is specified and required if architecture is specified.",
+ "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, version is ignored. When image is set, version should be empty. When image is set, architecture cannot be specified.",
+ "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.",
+}
+
+func (Update) SwaggerDoc() map[string]string {
+ return map_Update
+}
+
+var map_UpdateHistory = map[string]string{
+ "": "UpdateHistory is a single attempted update to the cluster.",
+ "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).",
+ "startedTime": "startedTime is the time at which the update was started.",
+ "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).",
+ "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.",
+ "image": "image is a container image location that contains the update. This value is always populated.",
+ "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.",
+ "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.",
+}
+
+func (UpdateHistory) SwaggerDoc() map[string]string {
+ return map_UpdateHistory
+}
+
+var map_Console = map[string]string{
+ "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Console) SwaggerDoc() map[string]string {
+ return map_Console
+}
+
+var map_ConsoleAuthentication = map[string]string{
+ "": "ConsoleAuthentication defines a list of optional configuration for console authentication.",
+ "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.",
+}
+
+func (ConsoleAuthentication) SwaggerDoc() map[string]string {
+ return map_ConsoleAuthentication
+}
+
+var map_ConsoleList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ConsoleList) SwaggerDoc() map[string]string {
+ return map_ConsoleList
+}
+
+var map_ConsoleSpec = map[string]string{
+ "": "ConsoleSpec is the specification of the desired behavior of the Console.",
+}
+
+func (ConsoleSpec) SwaggerDoc() map[string]string {
+ return map_ConsoleSpec
+}
+
+var map_ConsoleStatus = map[string]string{
+ "": "ConsoleStatus defines the observed status of the Console.",
+ "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.",
+}
+
+func (ConsoleStatus) SwaggerDoc() map[string]string {
+ return map_ConsoleStatus
+}
+
+var map_AWSDNSSpec = map[string]string{
+ "": "AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.",
+ "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.",
+}
+
+func (AWSDNSSpec) SwaggerDoc() map[string]string {
+ return map_AWSDNSSpec
+}
+
+var map_DNS = map[string]string{
+ "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (DNS) SwaggerDoc() map[string]string {
+ return map_DNS
+}
+
+var map_DNSList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (DNSList) SwaggerDoc() map[string]string {
+ return map_DNSList
+}
+
+var map_DNSPlatformSpec = map[string]string{
+ "": "DNSPlatformSpec holds cloud-provider-specific configuration for DNS administration.",
+ "type": "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\".\n\nIndividual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults.",
+ "aws": "aws contains DNS configuration specific to the Amazon Web Services cloud provider.",
+}
+
+func (DNSPlatformSpec) SwaggerDoc() map[string]string {
+ return map_DNSPlatformSpec
+}
+
+var map_DNSSpec = map[string]string{
+ "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.",
+ "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.",
+ "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.",
+ "platform": "platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.",
+}
+
+func (DNSSpec) SwaggerDoc() map[string]string {
+ return map_DNSSpec
+}
+
+var map_DNSZone = map[string]string{
+ "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.",
+ "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get",
+ "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options",
+}
+
+func (DNSZone) SwaggerDoc() map[string]string {
+ return map_DNSZone
+}
+
+var map_CustomFeatureGates = map[string]string{
+ "enabled": "enabled is a list of all feature gates that you want to force on",
+ "disabled": "disabled is a list of all feature gates that you want to force off",
+}
+
+func (CustomFeatureGates) SwaggerDoc() map[string]string {
+ return map_CustomFeatureGates
+}
+
+var map_FeatureGate = map[string]string{
+ "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (FeatureGate) SwaggerDoc() map[string]string {
+ return map_FeatureGate
+}
+
+var map_FeatureGateAttributes = map[string]string{
+ "name": "name is the name of the FeatureGate.",
+}
+
+func (FeatureGateAttributes) SwaggerDoc() map[string]string {
+ return map_FeatureGateAttributes
+}
+
+var map_FeatureGateDetails = map[string]string{
+ "version": "version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field.",
+ "enabled": "enabled is a list of all feature gates that are enabled in the cluster for the named version.",
+ "disabled": "disabled is a list of all feature gates that are disabled in the cluster for the named version.",
+}
+
+func (FeatureGateDetails) SwaggerDoc() map[string]string {
+ return map_FeatureGateDetails
+}
+
+var map_FeatureGateList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (FeatureGateList) SwaggerDoc() map[string]string {
+ return map_FeatureGateList
+}
+
+var map_FeatureGateSelection = map[string]string{
+ "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.",
+ "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.",
+}
+
+func (FeatureGateSelection) SwaggerDoc() map[string]string {
+ return map_FeatureGateSelection
+}
+
+var map_FeatureGateStatus = map[string]string{
+ "conditions": "conditions represent the observations of the current state. Known .status.conditions.type are: \"DeterminationDegraded\"",
+ "featureGates": "featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list.",
+}
+
+func (FeatureGateStatus) SwaggerDoc() map[string]string {
+ return map_FeatureGateStatus
+}
+
+var map_Image = map[string]string{
+ "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Image) SwaggerDoc() map[string]string {
+ return map_Image
+}
+
+var map_ImageList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImageList) SwaggerDoc() map[string]string {
+ return map_ImageList
+}
+
+var map_ImageSpec = map[string]string{
+ "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
+ "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
+ "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.",
+ "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.",
+}
+
+func (ImageSpec) SwaggerDoc() map[string]string {
+ return map_ImageSpec
+}
+
+var map_ImageStatus = map[string]string{
+ "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname.",
+ "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
+}
+
+func (ImageStatus) SwaggerDoc() map[string]string {
+ return map_ImageStatus
+}
+
+var map_RegistryLocation = map[string]string{
+ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
+ "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
+ "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
+}
+
+func (RegistryLocation) SwaggerDoc() map[string]string {
+ return map_RegistryLocation
+}
+
+var map_RegistrySources = map[string]string{
+ "": "RegistrySources holds cluster-wide information about how to handle the registries config.",
+ "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.",
+ "blockedRegistries": "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+ "allowedRegistries": "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.",
+ "containerRuntimeSearchRegistries": "containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.",
+}
+
+func (RegistrySources) SwaggerDoc() map[string]string {
+ return map_RegistrySources
+}
+
+var map_ImageContentPolicy = map[string]string{
+ "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+}
+
+func (ImageContentPolicy) SwaggerDoc() map[string]string {
+ return map_ImageContentPolicy
+}
+
+var map_ImageContentPolicyList = map[string]string{
+ "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImageContentPolicyList) SwaggerDoc() map[string]string {
+ return map_ImageContentPolicyList
+}
+
+var map_ImageContentPolicySpec = map[string]string{
+ "": "ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.",
+ "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\".\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.",
+}
+
+func (ImageContentPolicySpec) SwaggerDoc() map[string]string {
+ return map_ImageContentPolicySpec
+}
+
+var map_RepositoryDigestMirrors = map[string]string{
+ "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.",
+ "source": "source is the repository that users refer to, e.g. in image pull specifications.",
+ "allowMirrorByTags": "allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue.",
+ "mirrors": "mirrors is zero or more repositories that may also contain the same images. If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.",
+}
+
+func (RepositoryDigestMirrors) SwaggerDoc() map[string]string {
+ return map_RepositoryDigestMirrors
+}
+
+var map_ImageDigestMirrorSet = map[string]string{
+ "": "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status contains the observed state of the resource.",
+}
+
+func (ImageDigestMirrorSet) SwaggerDoc() map[string]string {
+ return map_ImageDigestMirrorSet
+}
+
+var map_ImageDigestMirrorSetList = map[string]string{
+ "": "ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImageDigestMirrorSetList) SwaggerDoc() map[string]string {
+ return map_ImageDigestMirrorSetList
+}
+
+var map_ImageDigestMirrorSetSpec = map[string]string{
+ "": "ImageDigestMirrorSetSpec is the specification of the ImageDigestMirrorSet CRD.",
+ "imageDigestMirrors": "imageDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using tag specification, users should configure a list of mirrors using \"ImageTagMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagedigestmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a specific order of mirrors, should configure them into one list of mirrors using the expected order.",
+}
+
+func (ImageDigestMirrorSetSpec) SwaggerDoc() map[string]string {
+ return map_ImageDigestMirrorSetSpec
+}
+
+var map_ImageDigestMirrors = map[string]string{
+ "": "ImageDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.",
+ "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table",
+ "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their digests. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\" Other cluster configuration, including (but not limited to) other imageDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table",
+ "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.",
+}
+
+func (ImageDigestMirrors) SwaggerDoc() map[string]string {
+ return map_ImageDigestMirrors
+}
+
+var map_ImageTagMirrorSet = map[string]string{
+ "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status contains the observed state of the resource.",
+}
+
+func (ImageTagMirrorSet) SwaggerDoc() map[string]string {
+ return map_ImageTagMirrorSet
+}
+
+var map_ImageTagMirrorSetList = map[string]string{
+ "": "ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImageTagMirrorSetList) SwaggerDoc() map[string]string {
+ return map_ImageTagMirrorSetList
+}
+
+var map_ImageTagMirrorSetSpec = map[string]string{
+ "": "ImageTagMirrorSetSpec is the specification of the ImageTagMirrorSet CRD.",
+ "imageTagMirrors": "imageTagMirrors allows images referenced by image tags in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in imageTagMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To use mirrors to pull images using digest specification only, users should configure a list of mirrors using \"ImageDigestMirrorSet\" CRD.\n\nIf the image pull specification matches the repository of \"source\" in multiple imagetagmirrorset objects, only the objects which define the most specific namespace match will be used. For example, if there are objects using quay.io/libpod and quay.io/libpod/busybox as the \"source\", only the objects using quay.io/libpod/busybox are going to apply for pull specification quay.io/libpod/busybox. Each “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. Users who want to use a deterministic order of mirrors, should configure them into one list of mirrors using the expected order.",
+}
+
+func (ImageTagMirrorSetSpec) SwaggerDoc() map[string]string {
+ return map_ImageTagMirrorSetSpec
+}
+
+var map_ImageTagMirrors = map[string]string{
+ "": "ImageTagMirrors holds cluster-wide information about how to handle mirrors in the registries config.",
+ "source": "source matches the repository that users refer to, e.g. in image pull specifications. Setting source to a registry hostname e.g. docker.io. quay.io, or registry.redhat.io, will match the image pull specification of corressponding registry. \"source\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo [*.]host for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table",
+ "mirrors": "mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. Images can be pulled from these mirrors only if they are referenced by their tags. The mirrored location is obtained by replacing the part of the input reference that matches source by the mirrors entry, e.g. for registry.redhat.io/product/repo reference, a (source, mirror) pair *.redhat.io, mirror.local/redhat causes a mirror.local/redhat/product/repo repository to be used. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Configuring a list of mirrors using \"ImageDigestMirrorSet\" CRD and forcing digest-pulls for mirrors avoids that issue. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. If no mirror is specified or all image pulls from the mirror list fail, the image will continue to be pulled from the repository in the pull spec unless explicitly prohibited by \"mirrorSourcePolicy\". Other cluster configuration, including (but not limited to) other imageTagMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. \"mirrors\" uses one of the following formats: host[:port] host[:port]/namespace[/namespace…] host[:port]/namespace[/namespace…]/repo for more information about the format, see the document about the location field: https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table",
+ "mirrorSourcePolicy": "mirrorSourcePolicy defines the fallback policy if fails to pull image from the mirrors. If unset, the image will continue to be pulled from the repository in the pull spec. sourcePolicy is valid configuration only when one or more mirrors are in the mirror list.",
+}
+
+func (ImageTagMirrors) SwaggerDoc() map[string]string {
+ return map_ImageTagMirrors
+}
+
+var map_AWSPlatformSpec = map[string]string{
+ "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.",
+ "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.",
+}
+
+func (AWSPlatformSpec) SwaggerDoc() map[string]string {
+ return map_AWSPlatformSpec
+}
+
+var map_AWSPlatformStatus = map[string]string{
+ "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.",
+ "region": "region holds the default AWS region for new AWS resources created by the cluster.",
+ "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.",
+ "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.",
+}
+
+func (AWSPlatformStatus) SwaggerDoc() map[string]string {
+ return map_AWSPlatformStatus
+}
+
+var map_AWSResourceTag = map[string]string{
+ "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.",
+ "key": "key is the key of the tag",
+ "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.",
+}
+
+func (AWSResourceTag) SwaggerDoc() map[string]string {
+ return map_AWSResourceTag
+}
+
+var map_AWSServiceEndpoint = map[string]string{
+ "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.",
+ "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.",
+ "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.",
+}
+
+func (AWSServiceEndpoint) SwaggerDoc() map[string]string {
+ return map_AWSServiceEndpoint
+}
+
+var map_AlibabaCloudPlatformSpec = map[string]string{
+ "": "AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. This only includes fields that can be modified in the cluster.",
+}
+
+func (AlibabaCloudPlatformSpec) SwaggerDoc() map[string]string {
+ return map_AlibabaCloudPlatformSpec
+}
+
+var map_AlibabaCloudPlatformStatus = map[string]string{
+ "": "AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.",
+ "region": "region specifies the region for Alibaba Cloud resources created for the cluster.",
+ "resourceGroupID": "resourceGroupID is the ID of the resource group for the cluster.",
+ "resourceTags": "resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.",
+}
+
+func (AlibabaCloudPlatformStatus) SwaggerDoc() map[string]string {
+ return map_AlibabaCloudPlatformStatus
+}
+
+var map_AlibabaCloudResourceTag = map[string]string{
+ "": "AlibabaCloudResourceTag is the set of tags to add to apply to resources.",
+ "key": "key is the key of the tag.",
+ "value": "value is the value of the tag.",
+}
+
+func (AlibabaCloudResourceTag) SwaggerDoc() map[string]string {
+ return map_AlibabaCloudResourceTag
+}
+
+var map_AzurePlatformSpec = map[string]string{
+ "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.",
+}
+
+func (AzurePlatformSpec) SwaggerDoc() map[string]string {
+ return map_AzurePlatformSpec
+}
+
+var map_AzurePlatformStatus = map[string]string{
+ "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.",
+ "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.",
+ "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.",
+ "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.",
+ "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.",
+ "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.",
+}
+
+func (AzurePlatformStatus) SwaggerDoc() map[string]string {
+ return map_AzurePlatformStatus
+}
+
+var map_AzureResourceTag = map[string]string{
+ "": "AzureResourceTag is a tag to apply to Azure resources created for the cluster.",
+ "key": "key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`.",
+ "value": "value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.",
+}
+
+func (AzureResourceTag) SwaggerDoc() map[string]string {
+ return map_AzureResourceTag
+}
+
+var map_BareMetalPlatformLoadBalancer = map[string]string{
+ "": "BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform.",
+ "type": "type defines the type of load balancer used by the cluster on BareMetal platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.",
+}
+
+func (BareMetalPlatformLoadBalancer) SwaggerDoc() map[string]string {
+ return map_BareMetalPlatformLoadBalancer
+}
+
+var map_BareMetalPlatformSpec = map[string]string{
+ "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).",
+ "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".",
+}
+
+func (BareMetalPlatformSpec) SwaggerDoc() map[string]string {
+ return map_BareMetalPlatformSpec
+}
+
+var map_BareMetalPlatformStatus = map[string]string{
+ "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.",
+ "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
+ "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.",
+ "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.",
+}
+
+func (BareMetalPlatformStatus) SwaggerDoc() map[string]string {
+ return map_BareMetalPlatformStatus
+}
+
+var map_CloudControllerManagerStatus = map[string]string{
+ "": "CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings",
+ "state": "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager\n\nValid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.",
+}
+
+func (CloudControllerManagerStatus) SwaggerDoc() map[string]string {
+ return map_CloudControllerManagerStatus
+}
+
+var map_CloudLoadBalancerConfig = map[string]string{
+ "": "CloudLoadBalancerConfig contains an union discriminator indicating the type of DNS solution in use within the cluster. When the DNSType is `ClusterHosted`, the cloud's Load Balancer configuration needs to be provided so that the DNS solution hosted within the cluster can be configured with those values.",
+ "dnsType": "dnsType indicates the type of DNS solution in use within the cluster. Its default value of `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. The cluster's use of the cloud's Load Balancers is unaffected by this setting. The value is immutable after it has been set at install time. Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. Enabling this functionality allows the user to start their own DNS solution outside the cluster after installation is complete. The customer would be responsible for configuring this custom DNS solution, and it can be run in addition to the in-cluster DNS solution.",
+ "clusterHosted": "clusterHosted holds the IP addresses of API, API-Int and Ingress Load Balancers on Cloud Platforms. The DNS solution hosted within the cluster use these IP addresses to provide resolution for API, API-Int and Ingress services.",
+}
+
+func (CloudLoadBalancerConfig) SwaggerDoc() map[string]string {
+ return map_CloudLoadBalancerConfig
+}
+
+var map_CloudLoadBalancerIPs = map[string]string{
+ "": "CloudLoadBalancerIPs contains the Load Balancer IPs for the cloud's API, API-Int and Ingress Load balancers. They will be populated as soon as the respective Load Balancers have been configured. These values are utilized to configure the DNS solution hosted within the cluster.",
+ "apiIntLoadBalancerIPs": "apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the apiIntLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.",
+ "apiLoadBalancerIPs": "apiLoadBalancerIPs holds Load Balancer IPs for the API service. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Could be empty for private clusters. Entries in the apiLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.",
+ "ingressLoadBalancerIPs": "ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. Entries in the ingressLoadBalancerIPs must be unique. A maximum of 16 IP addresses are permitted.",
+}
+
+func (CloudLoadBalancerIPs) SwaggerDoc() map[string]string {
+ return map_CloudLoadBalancerIPs
+}
+
+var map_EquinixMetalPlatformSpec = map[string]string{
+ "": "EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. This only includes fields that can be modified in the cluster.",
+}
+
+func (EquinixMetalPlatformSpec) SwaggerDoc() map[string]string {
+ return map_EquinixMetalPlatformSpec
+}
+
+var map_EquinixMetalPlatformStatus = map[string]string{
+ "": "EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
+}
+
+func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string {
+ return map_EquinixMetalPlatformStatus
+}
+
+var map_ExternalPlatformSpec = map[string]string{
+ "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.",
+ "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.",
+}
+
+func (ExternalPlatformSpec) SwaggerDoc() map[string]string {
+ return map_ExternalPlatformSpec
+}
+
+var map_ExternalPlatformStatus = map[string]string{
+ "": "ExternalPlatformStatus holds the current status of the generic External infrastructure provider.",
+ "cloudControllerManager": "cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.",
+}
+
+func (ExternalPlatformStatus) SwaggerDoc() map[string]string {
+ return map_ExternalPlatformStatus
+}
+
+var map_GCPPlatformSpec = map[string]string{
+ "": "GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. This only includes fields that can be modified in the cluster.",
+}
+
+func (GCPPlatformSpec) SwaggerDoc() map[string]string {
+ return map_GCPPlatformSpec
+}
+
+var map_GCPPlatformStatus = map[string]string{
+ "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.",
+ "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.",
+ "region": "region holds the region for new GCP resources created for the cluster.",
+ "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.",
+ "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.",
+ "cloudLoadBalancerConfig": "cloudLoadBalancerConfig is a union that contains the IP addresses of API, API-Int and Ingress Load Balancers created on the cloud platform. These values would not be populated on on-prem platforms. These Load Balancer IPs are used to configure the in-cluster DNS instances for API, API-Int and Ingress services. `dnsType` is expected to be set to `ClusterHosted` when these Load Balancer IP addresses are populated and used.",
+}
+
+func (GCPPlatformStatus) SwaggerDoc() map[string]string {
+ return map_GCPPlatformStatus
+}
+
+var map_GCPResourceLabel = map[string]string{
+ "": "GCPResourceLabel is a label to apply to GCP resources created for the cluster.",
+ "key": "key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`.",
+ "value": "value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`.",
+}
+
+func (GCPResourceLabel) SwaggerDoc() map[string]string {
+ return map_GCPResourceLabel
+}
+
+var map_GCPResourceTag = map[string]string{
+ "": "GCPResourceTag is a tag to apply to GCP resources created for the cluster.",
+ "parentID": "parentID is the ID of the hierarchical resource where the tags are defined, e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.",
+ "key": "key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`.",
+ "value": "value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.",
+}
+
+func (GCPResourceTag) SwaggerDoc() map[string]string {
+ return map_GCPResourceTag
+}
+
+var map_IBMCloudPlatformSpec = map[string]string{
+ "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.",
+}
+
+func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string {
+ return map_IBMCloudPlatformSpec
+}
+
+var map_IBMCloudPlatformStatus = map[string]string{
+ "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.",
+ "location": "Location is where the cluster has been deployed",
+ "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.",
+ "providerType": "ProviderType indicates the type of cluster that was created",
+ "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain",
+ "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain",
+ "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.",
+}
+
+func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string {
+ return map_IBMCloudPlatformStatus
+}
+
+var map_IBMCloudServiceEndpoint = map[string]string{
+ "": "IBMCloudServiceEndpoint stores the configuration of a custom url to override existing defaults of IBM Cloud Services.",
+ "name": "name is the name of the IBM Cloud service. Possible values are: CIS, COS, DNSServices, GlobalSearch, GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, ResourceManager, or VPC. For example, the IBM Cloud Private IAM service could be configured with the service `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`",
+ "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.",
+}
+
+func (IBMCloudServiceEndpoint) SwaggerDoc() map[string]string {
+ return map_IBMCloudServiceEndpoint
+}
+
+var map_Infrastructure = map[string]string{
+ "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Infrastructure) SwaggerDoc() map[string]string {
+ return map_Infrastructure
+}
+
+var map_InfrastructureList = map[string]string{
+ "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (InfrastructureList) SwaggerDoc() map[string]string {
+ return map_InfrastructureList
+}
+
+var map_InfrastructureSpec = map[string]string{
+ "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.",
+ "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.\n\ncloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only.",
+ "platformSpec": "platformSpec holds desired information specific to the underlying infrastructure provider.",
+}
+
+func (InfrastructureSpec) SwaggerDoc() map[string]string {
+ return map_InfrastructureSpec
+}
+
+var map_InfrastructureStatus = map[string]string{
+ "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.",
+ "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.",
+ "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.",
+ "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.",
+ "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.",
+ "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.",
+ "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.",
+ "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.",
+ "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.",
+ "cpuPartitioning": "cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are \"None\" and \"AllNodes\". When omitted, the default value is \"None\". The default value of \"None\" indicates that no nodes will be setup with CPU partitioning. The \"AllNodes\" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API.",
+}
+
+func (InfrastructureStatus) SwaggerDoc() map[string]string {
+ return map_InfrastructureStatus
+}
+
+var map_KubevirtPlatformSpec = map[string]string{
+ "": "KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. This only includes fields that can be modified in the cluster.",
+}
+
+func (KubevirtPlatformSpec) SwaggerDoc() map[string]string {
+ return map_KubevirtPlatformSpec
+}
+
+var map_KubevirtPlatformStatus = map[string]string{
+ "": "KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.",
+}
+
+func (KubevirtPlatformStatus) SwaggerDoc() map[string]string {
+ return map_KubevirtPlatformStatus
+}
+
+var map_NutanixFailureDomain = map[string]string{
+ "": "NutanixFailureDomain configures failure domain information for the Nutanix platform.",
+ "name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.",
+ "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
+ "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
+}
+
+func (NutanixFailureDomain) SwaggerDoc() map[string]string {
+ return map_NutanixFailureDomain
+}
+
+var map_NutanixPlatformLoadBalancer = map[string]string{
+ "": "NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform.",
+ "type": "type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.",
+}
+
+func (NutanixPlatformLoadBalancer) SwaggerDoc() map[string]string {
+ return map_NutanixPlatformLoadBalancer
+}
+
+var map_NutanixPlatformSpec = map[string]string{
+ "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.",
+ "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.",
+ "prismElements": "prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central.",
+ "failureDomains": "failureDomains configures failure domains information for the Nutanix platform. When set, the failure domains defined here may be used to spread Machines across prism element clusters to improve fault tolerance of the cluster.",
+}
+
+func (NutanixPlatformSpec) SwaggerDoc() map[string]string {
+ return map_NutanixPlatformSpec
+}
+
+var map_NutanixPlatformStatus = map[string]string{
+ "": "NutanixPlatformStatus holds the current status of the Nutanix infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.",
+ "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.",
+}
+
+func (NutanixPlatformStatus) SwaggerDoc() map[string]string {
+ return map_NutanixPlatformStatus
+}
+
+var map_NutanixPrismElementEndpoint = map[string]string{
+ "": "NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster)",
+ "name": "name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc).",
+ "endpoint": "endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.",
+}
+
+func (NutanixPrismElementEndpoint) SwaggerDoc() map[string]string {
+ return map_NutanixPrismElementEndpoint
+}
+
+var map_NutanixPrismEndpoint = map[string]string{
+ "": "NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster)",
+ "address": "address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster)",
+ "port": "port is the port number to access the Nutanix Prism Central or Element (cluster)",
+}
+
+func (NutanixPrismEndpoint) SwaggerDoc() map[string]string {
+ return map_NutanixPrismEndpoint
+}
+
+var map_NutanixResourceIdentifier = map[string]string{
+ "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)",
+ "type": "type is the identifier type to use for this resource.",
+ "uuid": "uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID.",
+ "name": "name is the resource name in the PC. It cannot be empty if the type is Name.",
+}
+
+func (NutanixResourceIdentifier) SwaggerDoc() map[string]string {
+ return map_NutanixResourceIdentifier
+}
+
+var map_OpenStackPlatformLoadBalancer = map[string]string{
+ "": "OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform.",
+ "type": "type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.",
+}
+
+func (OpenStackPlatformLoadBalancer) SwaggerDoc() map[string]string {
+ return map_OpenStackPlatformLoadBalancer
+}
+
+var map_OpenStackPlatformSpec = map[string]string{
+ "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).",
+ "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".",
+}
+
+func (OpenStackPlatformSpec) SwaggerDoc() map[string]string {
+ return map_OpenStackPlatformSpec
+}
+
+var map_OpenStackPlatformStatus = map[string]string{
+ "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.",
+ "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.",
+ "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
+ "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.",
+ "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.",
+}
+
+func (OpenStackPlatformStatus) SwaggerDoc() map[string]string {
+ return map_OpenStackPlatformStatus
+}
+
+var map_OvirtPlatformLoadBalancer = map[string]string{
+ "": "OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform.",
+ "type": "type defines the type of load balancer used by the cluster on Ovirt platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.",
+}
+
+func (OvirtPlatformLoadBalancer) SwaggerDoc() map[string]string {
+ return map_OvirtPlatformLoadBalancer
+}
+
+var map_OvirtPlatformSpec = map[string]string{
+ "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.",
+}
+
+func (OvirtPlatformSpec) SwaggerDoc() map[string]string {
+ return map_OvirtPlatformSpec
+}
+
+var map_OvirtPlatformStatus = map[string]string{
+ "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.",
+ "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.",
+ "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.",
+}
+
+func (OvirtPlatformStatus) SwaggerDoc() map[string]string {
+ return map_OvirtPlatformStatus
+}
+
+var map_PlatformSpec = map[string]string{
+ "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.",
+ "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
+ "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.",
+ "azure": "Azure contains settings specific to the Azure infrastructure provider.",
+ "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
+ "baremetal": "BareMetal contains settings specific to the BareMetal platform.",
+ "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
+ "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
+ "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.",
+ "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.",
+ "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.",
+ "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.",
+ "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.",
+ "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.",
+ "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.",
+ "external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.",
+}
+
+func (PlatformSpec) SwaggerDoc() map[string]string {
+ return map_PlatformSpec
+}
+
+var map_PlatformStatus = map[string]string{
+ "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.",
+ "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.",
+ "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.",
+ "azure": "Azure contains settings specific to the Azure infrastructure provider.",
+ "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.",
+ "baremetal": "BareMetal contains settings specific to the BareMetal platform.",
+ "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.",
+ "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.",
+ "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.",
+ "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.",
+ "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.",
+ "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.",
+ "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.",
+ "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.",
+ "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.",
+ "external": "External contains settings specific to the generic External infrastructure provider.",
+}
+
+func (PlatformStatus) SwaggerDoc() map[string]string {
+ return map_PlatformStatus
+}
+
+var map_PowerVSPlatformSpec = map[string]string{
+ "": "PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. This only includes fields that can be modified in the cluster.",
+ "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.",
+}
+
+func (PowerVSPlatformSpec) SwaggerDoc() map[string]string {
+ return map_PowerVSPlatformSpec
+}
+
+var map_PowerVSPlatformStatus = map[string]string{
+ "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.",
+ "region": "region holds the default Power VS region for new Power VS resources created by the cluster.",
+ "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported",
+ "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.",
+ "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.",
+ "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain",
+ "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain",
+}
+
+func (PowerVSPlatformStatus) SwaggerDoc() map[string]string {
+ return map_PowerVSPlatformStatus
+}
+
+var map_PowerVSServiceEndpoint = map[string]string{
+ "": "PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services.",
+ "name": "name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud",
+ "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.",
+}
+
+func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string {
+ return map_PowerVSServiceEndpoint
+}
+
+var map_VSpherePlatformFailureDomainSpec = map[string]string{
+ "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.",
+ "name": "name defines the arbitrary but unique name of a failure domain.",
+ "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.",
+ "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.",
+ "server": "server is the fully-qualified domain name or the IP address of the vCenter server.",
+ "topology": "Topology describes a given failure domain using vSphere constructs",
+}
+
+func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformFailureDomainSpec
+}
+
+var map_VSpherePlatformLoadBalancer = map[string]string{
+ "": "VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform.",
+ "type": "type defines the type of load balancer used by the cluster on VSphere platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.",
+}
+
+func (VSpherePlatformLoadBalancer) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformLoadBalancer
+}
+
+var map_VSpherePlatformNodeNetworking = map[string]string{
+ "": "VSpherePlatformNodeNetworking holds the external and internal node networking spec.",
+ "external": "external represents the network configuration of the node that is externally routable.",
+ "internal": "internal represents the network configuration of the node that is routable only within the cluster.",
+}
+
+func (VSpherePlatformNodeNetworking) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformNodeNetworking
+}
+
+var map_VSpherePlatformNodeNetworkingSpec = map[string]string{
+ "": "VSpherePlatformNodeNetworkingSpec holds the network CIDR(s) and port group name for including and excluding IP ranges in the cloud provider. This would be used for example when multiple network adapters are attached to a guest to help determine which IP address the cloud config manager should use for the external and internal node networking.",
+ "networkSubnetCidr": "networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields.",
+ "network": "network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'`",
+ "excludeNetworkSubnetCidr": "excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields.",
+}
+
+func (VSpherePlatformNodeNetworkingSpec) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformNodeNetworkingSpec
+}
+
+var map_VSpherePlatformSpec = map[string]string{
+ "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. In the future the cloud provider operator, storage operator and machine operator will use these fields for configuration.",
+ "vcenters": "vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported.",
+ "failureDomains": "failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used.",
+ "nodeNetworking": "nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.apiServerInternalIPs will be used. Once set, the list cannot be completely removed (but its second entry can).",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IP addresses, one from IPv4 family and one from IPv6. In single stack clusters a single IP address is expected. When omitted, values from the status.ingressIPs will be used. Once set, the list cannot be completely removed (but its second entry can).",
+ "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided in the CIDR format and should be IPv4 or IPv6, for example \"10.0.0.0/8\" or \"fd00::/8\".",
+}
+
+func (VSpherePlatformSpec) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformSpec
+}
+
+var map_VSpherePlatformStatus = map[string]string{
+ "": "VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.",
+ "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.\n\nDeprecated: Use APIServerInternalIPs instead.",
+ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.",
+ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.",
+ "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.",
+ "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.",
+ "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.",
+ "machineNetworks": "machineNetworks are IP networks used to connect all the OpenShift cluster nodes.",
+}
+
+func (VSpherePlatformStatus) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformStatus
+}
+
+var map_VSpherePlatformTopology = map[string]string{
+ "": "VSpherePlatformTopology holds the required and optional vCenter objects - datacenter, computeCluster, networks, datastore and resourcePool - to provision virtual machines.",
+ "datacenter": "datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters.",
+ "computeCluster": "computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters.",
+ "networks": "networks is the list of port group network names within this failure domain. Currently, we only support a single interface per RHCOS virtual machine. The available networks (port groups) can be listed using `govc ls 'network/*'` The single interface should be the absolute path of the form //network/.",
+ "datastore": "datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters.",
+ "resourcePool": "resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters.",
+ "folder": "folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters.",
+ "template": "template is the full inventory path of the virtual machine or template that will be cloned when creating new machines in this failure domain. The maximum length of the path is 2048 characters.\n\nWhen omitted, the template will be calculated by the control plane machineset operator based on the region and zone defined in VSpherePlatformFailureDomainSpec. For example, for zone=zonea, region=region1, and infrastructure name=test, the template path would be calculated as //vm/test-rhcos-region1-zonea.",
+}
+
+func (VSpherePlatformTopology) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformTopology
+}
+
+var map_VSpherePlatformVCenterSpec = map[string]string{
+ "": "VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM.",
+ "server": "server is the fully-qualified domain name or the IP address of the vCenter server.",
+ "port": "port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time.",
+ "datacenters": "The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology.",
+}
+
+func (VSpherePlatformVCenterSpec) SwaggerDoc() map[string]string {
+ return map_VSpherePlatformVCenterSpec
+}
+
+var map_AWSIngressSpec = map[string]string{
+ "": "AWSIngressSpec holds the desired state of the Ingress for Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.",
+ "type": "type allows user to set a load balancer type. When this field is set the default ingresscontroller will get created using the specified LBType. If this field is not set then the default ingress controller of LBType Classic will be created. Valid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb",
+}
+
+func (AWSIngressSpec) SwaggerDoc() map[string]string {
+ return map_AWSIngressSpec
+}
+
+var map_ComponentRouteSpec = map[string]string{
+ "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.",
+ "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.",
+ "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.",
+ "hostname": "hostname is the hostname that should be used by the route.",
+ "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.",
+}
+
+func (ComponentRouteSpec) SwaggerDoc() map[string]string {
+ return map_ComponentRouteSpec
+}
+
+var map_ComponentRouteStatus = map[string]string{
+ "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.",
+ "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.",
+ "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.",
+ "defaultHostname": "defaultHostname is the hostname of this route prior to customization.",
+ "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.",
+ "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.",
+ "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.",
+ "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.",
+}
+
+func (ComponentRouteStatus) SwaggerDoc() map[string]string {
+ return map_ComponentRouteStatus
+}
+
+var map_Ingress = map[string]string{
+ "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Ingress) SwaggerDoc() map[string]string {
+ return map_Ingress
+}
+
+var map_IngressList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (IngressList) SwaggerDoc() map[string]string {
+ return map_IngressList
+}
+
+var map_IngressPlatformSpec = map[string]string{
+ "": "IngressPlatformSpec holds the desired state of Ingress specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.",
+ "type": "type is the underlying infrastructure provider for the cluster. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.",
+ "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.",
+}
+
+func (IngressPlatformSpec) SwaggerDoc() map[string]string {
+ return map_IngressPlatformSpec
+}
+
+var map_IngressSpec = map[string]string{
+ "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.",
+ "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.",
+ "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.",
+ "requiredHSTSPolicies": "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission.\n\nA candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains\n\n- For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation.\n\nThe HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working.\n\nNote that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid.",
+ "loadBalancer": "loadBalancer contains the load balancer details in general which are not only specific to the underlying infrastructure provider of the current cluster and are required for Ingress Controller to work on OpenShift.",
+}
+
+func (IngressSpec) SwaggerDoc() map[string]string {
+ return map_IngressSpec
+}
+
+var map_IngressStatus = map[string]string{
+ "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.",
+ "defaultPlacement": "defaultPlacement is set at installation time to control which nodes will host the ingress router pods by default. The options are control-plane nodes or worker nodes.\n\nThis field works by dictating how the Cluster Ingress Operator will consider unset replicas and nodePlacement fields in IngressController resources when creating the corresponding Deployments.\n\nSee the documentation for the IngressController replicas and nodePlacement fields for more information.\n\nWhen omitted, the default value is Workers",
+}
+
+func (IngressStatus) SwaggerDoc() map[string]string {
+ return map_IngressStatus
+}
+
+var map_LoadBalancer = map[string]string{
+ "platform": "platform holds configuration specific to the underlying infrastructure provider for the ingress load balancers. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.",
+}
+
+func (LoadBalancer) SwaggerDoc() map[string]string {
+ return map_LoadBalancer
+}
+
+var map_ClusterNetworkEntry = map[string]string{
+ "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.",
+ "cidr": "The complete block for pod IPs.",
+ "hostPrefix": "The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset.",
+}
+
+func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
+ return map_ClusterNetworkEntry
+}
+
+var map_ExternalIPConfig = map[string]string{
+ "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.",
+ "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.",
+ "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.",
+}
+
+func (ExternalIPConfig) SwaggerDoc() map[string]string {
+ return map_ExternalIPConfig
+}
+
+var map_ExternalIPPolicy = map[string]string{
+ "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.",
+ "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.",
+ "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.",
+}
+
+func (ExternalIPPolicy) SwaggerDoc() map[string]string {
+ return map_ExternalIPPolicy
+}
+
+var map_MTUMigration = map[string]string{
+ "": "MTUMigration contains infomation about MTU migration.",
+ "network": "Network contains MTU migration configuration for the default network.",
+ "machine": "Machine contains MTU migration configuration for the machine's uplink.",
+}
+
+func (MTUMigration) SwaggerDoc() map[string]string {
+ return map_MTUMigration
+}
+
+var map_MTUMigrationValues = map[string]string{
+ "": "MTUMigrationValues contains the values for a MTU migration.",
+ "to": "To is the MTU to migrate to.",
+ "from": "From is the MTU to migrate from.",
+}
+
+func (MTUMigrationValues) SwaggerDoc() map[string]string {
+ return map_MTUMigrationValues
+}
+
+var map_Network = map[string]string{
+ "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Network) SwaggerDoc() map[string]string {
+ return map_Network
+}
+
+var map_NetworkDiagnostics = map[string]string{
+ "mode": "mode controls the network diagnostics mode\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is All.",
+ "sourcePlacement": "sourcePlacement controls the scheduling of network diagnostics source deployment\n\nSee NetworkDiagnosticsSourcePlacement for more details about default values.",
+ "targetPlacement": "targetPlacement controls the scheduling of network diagnostics target daemonset\n\nSee NetworkDiagnosticsTargetPlacement for more details about default values.",
+}
+
+func (NetworkDiagnostics) SwaggerDoc() map[string]string {
+ return map_NetworkDiagnostics
+}
+
+var map_NetworkDiagnosticsSourcePlacement = map[string]string{
+ "": "NetworkDiagnosticsSourcePlacement defines node scheduling configuration network diagnostics source components",
+ "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.",
+ "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is an empty list.",
+}
+
+func (NetworkDiagnosticsSourcePlacement) SwaggerDoc() map[string]string {
+ return map_NetworkDiagnosticsSourcePlacement
+}
+
+var map_NetworkDiagnosticsTargetPlacement = map[string]string{
+ "": "NetworkDiagnosticsTargetPlacement defines node scheduling configuration network diagnostics target components",
+ "nodeSelector": "nodeSelector is the node selector applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `kubernetes.io/os: linux`.",
+ "tolerations": "tolerations is a list of tolerations applied to network diagnostics components\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is `- operator: \"Exists\"` which means that all taints are tolerated.",
+}
+
+func (NetworkDiagnosticsTargetPlacement) SwaggerDoc() map[string]string {
+ return map_NetworkDiagnosticsTargetPlacement
+}
+
+var map_NetworkList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (NetworkList) SwaggerDoc() map[string]string {
+ return map_NetworkList
+}
+
+var map_NetworkMigration = map[string]string{
+ "": "NetworkMigration represents the cluster network configuration.",
+ "networkType": "NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes",
+ "mtu": "MTU contains the MTU migration configuration.",
+}
+
+func (NetworkMigration) SwaggerDoc() map[string]string {
+ return map_NetworkMigration
+}
+
+var map_NetworkSpec = map[string]string{
+ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.",
+ "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.",
+ "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.",
+ "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.",
+ "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.",
+ "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.",
+ "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.",
+}
+
+func (NetworkSpec) SwaggerDoc() map[string]string {
+ return map_NetworkSpec
+}
+
+var map_NetworkStatus = map[string]string{
+ "": "NetworkStatus is the current network configuration.",
+ "clusterNetwork": "IP address pool to use for pod IPs.",
+ "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.",
+ "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).",
+ "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.",
+ "migration": "Migration contains the cluster network migration configuration.",
+ "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkTypeMigrationInProgress\", \"NetworkTypeMigrationMTUReady\", \"NetworkTypeMigrationTargetCNIAvailable\", \"NetworkTypeMigrationTargetCNIInUse\", \"NetworkTypeMigrationOriginalCNIPurged\" and \"NetworkDiagnosticsAvailable\"",
+}
+
+func (NetworkStatus) SwaggerDoc() map[string]string {
+ return map_NetworkStatus
+}
+
+var map_Node = map[string]string{
+ "": "Node holds cluster-wide information about node specific features.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values.",
+}
+
+func (Node) SwaggerDoc() map[string]string {
+ return map_Node
+}
+
+var map_NodeList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (NodeList) SwaggerDoc() map[string]string {
+ return map_NodeList
+}
+
+var map_NodeSpec = map[string]string{
+ "cgroupMode": "CgroupMode determines the cgroups version on the node",
+ "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster",
+}
+
+func (NodeSpec) SwaggerDoc() map[string]string {
+ return map_NodeSpec
+}
+
+var map_BasicAuthIdentityProvider = map[string]string{
+ "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials",
+}
+
+func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string {
+ return map_BasicAuthIdentityProvider
+}
+
+var map_GitHubIdentityProvider = map[string]string{
+ "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "organizations": "organizations optionally restricts which organizations are allowed to log in",
+ "teams": "teams optionally restricts which teams are allowed to log in. Format is /.",
+ "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.",
+}
+
+func (GitHubIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitHubIdentityProvider
+}
+
+var map_GitLabIdentityProvider = map[string]string{
+ "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "url": "url is the oauth server base URL",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+}
+
+func (GitLabIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitLabIdentityProvider
+}
+
+var map_GoogleIdentityProvider = map[string]string{
+ "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to",
+}
+
+func (GoogleIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GoogleIdentityProvider
+}
+
+var map_HTPasswdIdentityProvider = map[string]string{
+ "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials",
+ "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
+}
+
+func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string {
+ return map_HTPasswdIdentityProvider
+}
+
+var map_IdentityProvider = map[string]string{
+ "": "IdentityProvider provides identities for users authenticating using credentials",
+ "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName",
+ "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"",
+}
+
+func (IdentityProvider) SwaggerDoc() map[string]string {
+ return map_IdentityProvider
+}
+
+var map_IdentityProviderConfig = map[string]string{
+ "": "IdentityProviderConfig contains configuration for using a specific identity provider",
+ "type": "type identifies the identity provider type for this entry.",
+ "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP",
+ "github": "github enables user authentication using GitHub credentials",
+ "gitlab": "gitlab enables user authentication using GitLab credentials",
+ "google": "google enables user authentication using Google credentials",
+ "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials",
+ "keystone": "keystone enables user authentication using keystone password credentials",
+ "ldap": "ldap enables user authentication using LDAP credentials",
+ "openID": "openID enables user authentication using OpenID credentials",
+ "requestHeader": "requestHeader enables user authentication using request header credentials",
+}
+
+func (IdentityProviderConfig) SwaggerDoc() map[string]string {
+ return map_IdentityProviderConfig
+}
+
+var map_KeystoneIdentityProvider = map[string]string{
+ "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials",
+ "domainName": "domainName is required for keystone v3",
+}
+
+func (KeystoneIdentityProvider) SwaggerDoc() map[string]string {
+ return map_KeystoneIdentityProvider
+}
+
+var map_LDAPAttributeMapping = map[string]string{
+ "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields",
+ "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"",
+ "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"",
+ "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"",
+ "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+}
+
+func (LDAPAttributeMapping) SwaggerDoc() map[string]string {
+ return map_LDAPAttributeMapping
+}
+
+var map_LDAPIdentityProvider = map[string]string{
+ "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials",
+ "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter",
+ "bindDN": "bindDN is an optional DN to bind with during the search phase.",
+ "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+ "attributes": "attributes maps LDAP attributes to identities",
+}
+
+func (LDAPIdentityProvider) SwaggerDoc() map[string]string {
+ return map_LDAPIdentityProvider
+}
+
+var map_OAuth = map[string]string{
+ "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (OAuth) SwaggerDoc() map[string]string {
+ return map_OAuth
+}
+
+var map_OAuthList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (OAuthList) SwaggerDoc() map[string]string {
+ return map_OAuthList
+}
+
+var map_OAuthRemoteConnectionInfo = map[string]string{
+ "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection",
+ "url": "url is the remote URL to connect to",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+ "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.",
+}
+
+func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string {
+ return map_OAuthRemoteConnectionInfo
+}
+
+var map_OAuthSpec = map[string]string{
+ "": "OAuthSpec contains desired cluster auth configuration",
+ "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.",
+ "tokenConfig": "tokenConfig contains options for authorization and access tokens",
+ "templates": "templates allow you to customize pages like the login page.",
+}
+
+func (OAuthSpec) SwaggerDoc() map[string]string {
+ return map_OAuthSpec
+}
+
+var map_OAuthStatus = map[string]string{
+ "": "OAuthStatus shows current known state of OAuth server in the cluster",
+}
+
+func (OAuthStatus) SwaggerDoc() map[string]string {
+ return map_OAuthStatus
+}
+
+var map_OAuthTemplates = map[string]string{
+ "": "OAuthTemplates allow for customization of pages like the login page",
+ "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.",
+ "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.",
+ "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.",
+}
+
+func (OAuthTemplates) SwaggerDoc() map[string]string {
+ return map_OAuthTemplates
+}
+
+var map_OpenIDClaims = map[string]string{
+ "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider",
+ "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim",
+ "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity",
+ "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+ "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used.",
+}
+
+func (OpenIDClaims) SwaggerDoc() map[string]string {
+ return map_OpenIDClaims
+}
+
+var map_OpenIDIdentityProvider = map[string]string{
+ "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+ "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.",
+ "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.",
+ "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.",
+ "claims": "claims mappings",
+}
+
+func (OpenIDIdentityProvider) SwaggerDoc() map[string]string {
+ return map_OpenIDIdentityProvider
+}
+
+var map_RequestHeaderIdentityProvider = map[string]string{
+ "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials",
+ "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.",
+ "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.",
+ "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.",
+ "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.",
+ "headers": "headers is the set of headers to check for identity information",
+ "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username",
+ "nameHeaders": "nameHeaders is the set of headers to check for the display name",
+ "emailHeaders": "emailHeaders is the set of headers to check for the email address",
+}
+
+func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string {
+ return map_RequestHeaderIdentityProvider
+}
+
+var map_TokenConfig = map[string]string{
+ "": "TokenConfig holds the necessary configuration options for authorization and access tokens",
+ "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens",
+ "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.",
+ "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value",
+}
+
+func (TokenConfig) SwaggerDoc() map[string]string {
+ return map_TokenConfig
+}
+
+var map_HubSource = map[string]string{
+ "": "HubSource is used to specify the hub source and its configuration",
+ "name": "name is the name of one of the default hub sources",
+ "disabled": "disabled is used to disable a default hub source on cluster",
+}
+
+func (HubSource) SwaggerDoc() map[string]string {
+ return map_HubSource
+}
+
+var map_HubSourceStatus = map[string]string{
+ "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source",
+ "status": "status indicates success or failure in applying the configuration",
+ "message": "message provides more information regarding failures",
+}
+
+func (HubSourceStatus) SwaggerDoc() map[string]string {
+ return map_HubSourceStatus
+}
+
+var map_OperatorHub = map[string]string{
+ "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (OperatorHub) SwaggerDoc() map[string]string {
+ return map_OperatorHub
+}
+
+var map_OperatorHubList = map[string]string{
+ "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (OperatorHubList) SwaggerDoc() map[string]string {
+ return map_OperatorHubList
+}
+
+var map_OperatorHubSpec = map[string]string{
+ "": "OperatorHubSpec defines the desired state of OperatorHub",
+ "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.",
+ "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.",
+}
+
+func (OperatorHubSpec) SwaggerDoc() map[string]string {
+ return map_OperatorHubSpec
+}
+
+var map_OperatorHubStatus = map[string]string{
+ "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.",
+ "sources": "sources encapsulates the result of applying the configuration for each hub source",
+}
+
+func (OperatorHubStatus) SwaggerDoc() map[string]string {
+ return map_OperatorHubStatus
+}
+
+var map_Project = map[string]string{
+ "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Project) SwaggerDoc() map[string]string {
+ return map_Project
+}
+
+var map_ProjectList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ProjectList) SwaggerDoc() map[string]string {
+ return map_ProjectList
+}
+
+var map_ProjectSpec = map[string]string{
+ "": "ProjectSpec holds the project creation configuration.",
+ "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
+ "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.",
+}
+
+func (ProjectSpec) SwaggerDoc() map[string]string {
+ return map_ProjectSpec
+}
+
+var map_TemplateReference = map[string]string{
+ "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.",
+ "name": "name is the metadata.name of the referenced project request template",
+}
+
+func (TemplateReference) SwaggerDoc() map[string]string {
+ return map_TemplateReference
+}
+
+var map_Proxy = map[string]string{
+ "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds user-settable values for the proxy configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Proxy) SwaggerDoc() map[string]string {
+ return map_Proxy
+}
+
+var map_ProxyList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ProxyList) SwaggerDoc() map[string]string {
+ return map_ProxyList
+}
+
+var map_ProxySpec = map[string]string{
+ "": "ProxySpec contains cluster proxy creation configuration.",
+ "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.",
+ "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.",
+ "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var.",
+ "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.",
+ "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |",
+}
+
+func (ProxySpec) SwaggerDoc() map[string]string {
+ return map_ProxySpec
+}
+
+var map_ProxyStatus = map[string]string{
+ "": "ProxyStatus shows current known state of the cluster proxy.",
+ "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.",
+ "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.",
+ "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.",
+}
+
+func (ProxyStatus) SwaggerDoc() map[string]string {
+ return map_ProxyStatus
+}
+
+var map_ProfileCustomizations = map[string]string{
+ "": "ProfileCustomizations contains various parameters for modifying the default behavior of certain profiles",
+ "dynamicResourceAllocation": "dynamicResourceAllocation allows to enable or disable dynamic resource allocation within the scheduler. Dynamic resource allocation is an API for requesting and sharing resources between pods and containers inside a pod. Third-party resource drivers are responsible for tracking and allocating resources. Different kinds of resources support arbitrary parameters for defining requirements and initialization. Valid values are Enabled, Disabled and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Disabled.",
+}
+
+func (ProfileCustomizations) SwaggerDoc() map[string]string {
+ return map_ProfileCustomizations
+}
+
+var map_Scheduler = map[string]string{
+ "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Scheduler) SwaggerDoc() map[string]string {
+ return map_Scheduler
+}
+
+var map_SchedulerList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (SchedulerList) SwaggerDoc() map[string]string {
+ return map_SchedulerList
+}
+
+var map_SchedulerSpec = map[string]string{
+ "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.",
+ "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"",
+ "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.",
+ "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.",
+ "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.",
+}
+
+func (SchedulerSpec) SwaggerDoc() map[string]string {
+ return map_SchedulerSpec
+}
+
+var map_FeatureGateTests = map[string]string{
+ "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.",
+ "tests": "Tests contains an item for every TestName",
+}
+
+func (FeatureGateTests) SwaggerDoc() map[string]string {
+ return map_FeatureGateTests
+}
+
+var map_TestDetails = map[string]string{
+ "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.",
+}
+
+func (TestDetails) SwaggerDoc() map[string]string {
+ return map_TestDetails
+}
+
+var map_TestReporting = map[string]string{
+ "": "TestReporting is used for origin (and potentially others) to report the test names for a given FeatureGate into the payload for later analysis on a per-payload basis. This doesn't need any CRD because it's never stored in the cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (TestReporting) SwaggerDoc() map[string]string {
+ return map_TestReporting
+}
+
+var map_TestReportingSpec = map[string]string{
+ "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.",
+}
+
+func (TestReportingSpec) SwaggerDoc() map[string]string {
+ return map_TestReportingSpec
+}
+
+var map_CustomTLSProfile = map[string]string{
+ "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.",
+}
+
+func (CustomTLSProfile) SwaggerDoc() map[string]string {
+ return map_CustomTLSProfile
+}
+
+var map_IntermediateTLSProfile = map[string]string{
+ "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29",
+}
+
+func (IntermediateTLSProfile) SwaggerDoc() map[string]string {
+ return map_IntermediateTLSProfile
+}
+
+var map_ModernTLSProfile = map[string]string{
+ "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility",
+}
+
+func (ModernTLSProfile) SwaggerDoc() map[string]string {
+ return map_ModernTLSProfile
+}
+
+var map_OldTLSProfile = map[string]string{
+ "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility",
+}
+
+func (OldTLSProfile) SwaggerDoc() map[string]string {
+ return map_OldTLSProfile
+}
+
+var map_TLSProfileSpec = map[string]string{
+ "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.",
+ "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA",
+ "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: VersionTLS11\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12",
+}
+
+func (TLSProfileSpec) SwaggerDoc() map[string]string {
+ return map_TLSProfileSpec
+}
+
+var map_TLSSecurityProfile = map[string]string{
+ "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.",
+ "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.",
+ "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n - DHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-ECDSA-AES128-SHA256\n\n - ECDHE-RSA-AES128-SHA256\n\n - ECDHE-ECDSA-AES128-SHA\n\n - ECDHE-RSA-AES128-SHA\n\n - ECDHE-ECDSA-AES256-SHA384\n\n - ECDHE-RSA-AES256-SHA384\n\n - ECDHE-ECDSA-AES256-SHA\n\n - ECDHE-RSA-AES256-SHA\n\n - DHE-RSA-AES128-SHA256\n\n - DHE-RSA-AES256-SHA256\n\n - AES128-GCM-SHA256\n\n - AES256-GCM-SHA384\n\n - AES128-SHA256\n\n - AES256-SHA256\n\n - AES128-SHA\n\n - AES256-SHA\n\n - DES-CBC3-SHA\n\n minTLSVersion: VersionTLS10",
+ "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES256-GCM-SHA384\n\n - ECDHE-RSA-AES256-GCM-SHA384\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - DHE-RSA-AES128-GCM-SHA256\n\n - DHE-RSA-AES256-GCM-SHA384\n\n minTLSVersion: VersionTLS12",
+ "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n\n - TLS_AES_128_GCM_SHA256\n\n - TLS_AES_256_GCM_SHA384\n\n - TLS_CHACHA20_POLY1305_SHA256\n\n minTLSVersion: VersionTLS13",
+ "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n\n - ECDHE-ECDSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-CHACHA20-POLY1305\n\n - ECDHE-RSA-AES128-GCM-SHA256\n\n - ECDHE-ECDSA-AES128-GCM-SHA256\n\n minTLSVersion: VersionTLS11",
+}
+
+func (TLSSecurityProfile) SwaggerDoc() map[string]string {
+ return map_TLSSecurityProfile
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/Makefile b/vendor/github.com/openshift/api/config/v1alpha1/Makefile
new file mode 100644
index 0000000000..e32ad5d9e1
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1alpha1"
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/doc.go b/vendor/github.com/openshift/api/config/v1alpha1/doc.go
new file mode 100644
index 0000000000..20d4485739
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=config.openshift.io
+// Package v1alpha1 is the v1alpha1 version of the API.
+package v1alpha1
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/register.go b/vendor/github.com/openshift/api/config/v1alpha1/register.go
new file mode 100644
index 0000000000..36432ceb80
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/register.go
@@ -0,0 +1,44 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "config.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &InsightsDataGather{},
+ &InsightsDataGatherList{},
+ &Backup{},
+ &BackupList{},
+ &ImagePolicy{},
+ &ImagePolicyList{},
+ &ClusterImagePolicy{},
+ &ClusterImagePolicyList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go
new file mode 100644
index 0000000000..65eb5c1f75
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go
@@ -0,0 +1,174 @@
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+//
+// Backup provides configuration for performing backups of the openshift cluster.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=backups,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1482
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +openshift:enable:FeatureGate=AutomatedEtcdBackup
+// +openshift:compatibility-gen:level=4
+type Backup struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec BackupSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +kubebuilder:validation:Optional
+ // +optional
+ Status BackupStatus `json:"status"`
+}
+
+type BackupSpec struct {
+ // etcd specifies the configuration for periodic backups of the etcd cluster
+ // +kubebuilder:validation:Required
+ EtcdBackupSpec EtcdBackupSpec `json:"etcd"`
+}
+
+type BackupStatus struct {
+}
+
+// EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator
+type EtcdBackupSpec struct {
+
+ // Schedule defines the recurring backup schedule in Cron format
+ // every 2 hours: 0 */2 * * *
+ // every day at 3am: 0 3 * * *
+ // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice.
+ // The current default is "no backups", but will change in the future.
+ // +kubebuilder:validation:Optional
+ // +optional
+ // +kubebuilder:validation:Pattern:=`^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$`
+ Schedule string `json:"schedule"`
+
+ // Cron Regex breakdown:
+ // Allow macros: (@(annually|yearly|monthly|weekly|daily|hourly))
+ // OR
+ // Minute:
+ // (\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*))
+ // Hour:
+ // (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*))
+ // Day of the Month:
+ // (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*))
+ // Month:
+ // (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*))
+ // Day of Week:
+ // (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))
+ //
+
+ // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
+ // If not specified, this will default to the time zone of the kube-controller-manager process.
+ // See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones
+ // +kubebuilder:validation:Optional
+ // +optional
+ // +kubebuilder:validation:Pattern:=`^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$`
+ TimeZone string `json:"timeZone"`
+
+ // Timezone regex breakdown:
+ // ([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(/[A-Za-z_]+){1,2}) - Matches either:
+ // [A-Za-z_]+([+-]*0)* - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by a +0 or -0 to account for GMT+0 or GMT-0 (for the first part of the timezone identifier).
+ // [A-Za-z_]+(/[A-Za-z_]+){1,2} - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by one or two occurrences of a forward slash followed by one or more alphabetical characters or underscores. This allows for matching timezone identifiers with 2 or 3 parts, e.g America/Argentina/Buenos_Aires
+ // (/GMT[+-]\d{1,2})? - Makes the GMT offset suffix optional. It matches "/GMT" followed by either a plus ("+") or minus ("-") sign and one or two digits (the GMT offset)
+
+ // RetentionPolicy defines the retention policy for retaining and deleting existing backups.
+ // +kubebuilder:validation:Optional
+ // +optional
+ RetentionPolicy RetentionPolicy `json:"retentionPolicy"`
+
+ // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the
+ // etcd backup files would be saved
+ // The PVC itself must always be created in the "openshift-etcd" namespace
+ // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup.
+ // In the future this would be backups saved across the control-plane master nodes.
+ // +kubebuilder:validation:Optional
+ // +optional
+ PVCName string `json:"pvcName"`
+}
+
+// RetentionType is the enumeration of valid retention policy types
+// +enum
+// +kubebuilder:validation:Enum:="RetentionNumber";"RetentionSize"
+type RetentionType string
+
+const (
+ // RetentionTypeNumber sets the retention policy based on the number of backup files saved
+ RetentionTypeNumber RetentionType = "RetentionNumber"
+ // RetentionTypeSize sets the retention policy based on the total size of the backup files saved
+ RetentionTypeSize RetentionType = "RetentionSize"
+)
+
+// RetentionPolicy defines the retention policy for retaining and deleting existing backups.
+// This struct is a discriminated union that allows users to select the type of retention policy from the supported types.
+// +union
+type RetentionPolicy struct {
+ // RetentionType sets the type of retention policy.
+ // Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future.
+ // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice.
+ // The current default is RetentionNumber with 15 backups kept.
+ // +unionDiscriminator
+ // +required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum:="";"RetentionNumber";"RetentionSize"
+ RetentionType RetentionType `json:"retentionType"`
+
+ // RetentionNumber configures the retention policy based on the number of backups
+ // +kubebuilder:validation:Optional
+ // +optional
+ RetentionNumber *RetentionNumberConfig `json:"retentionNumber,omitempty"`
+
+ // RetentionSize configures the retention policy based on the size of backups
+ // +kubebuilder:validation:Optional
+ // +optional
+ RetentionSize *RetentionSizeConfig `json:"retentionSize,omitempty"`
+}
+
+// RetentionNumberConfig specifies the configuration of the retention policy on the number of backups
+type RetentionNumberConfig struct {
+ // MaxNumberOfBackups defines the maximum number of backups to retain.
+ // If the existing number of backups saved is equal to MaxNumberOfBackups then
+ // the oldest backup will be removed before a new backup is initiated.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Required
+ // +required
+ MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"`
+}
+
+// RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups
+type RetentionSizeConfig struct {
+ // MaxSizeOfBackupsGb defines the total size in GB of backups to retain.
+ // If the current total size backups exceeds MaxSizeOfBackupsGb then
+ // the oldest backup will be removed before a new backup is initiated.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Required
+ // +required
+ MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BackupList is a collection of items
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type BackupList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+ Items []Backup `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go
new file mode 100644
index 0000000000..c503fdeab6
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go
@@ -0,0 +1,77 @@
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterImagePolicy holds cluster-wide configuration for image signature verification
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +openshift:enable:FeatureGate=ImagePolicy
+// +openshift:compatibility-gen:level=4
+type ClusterImagePolicy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec contains the configuration for the cluster image policy.
+ // +kubebuilder:validation:Required
+ Spec ClusterImagePolicySpec `json:"spec"`
+ // status contains the observed state of the resource.
+ // +optional
+ Status ClusterImagePolicyStatus `json:"status,omitempty"`
+}
+
+// CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.
+type ClusterImagePolicySpec struct {
+ // scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2".
+ // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest).
+ // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository
+ // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number).
+ // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not.
+ // Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images.
+ // If configured, the policies for OpenShift Container Platform repositories will not be in effect.
+ // For additional details about the format, please refer to the document explaining the docker transport field,
+ // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxItems=256
+ // +listType=set
+ Scopes []ImageScope `json:"scopes"`
+ // policy contains configuration to allow scopes to be verified, and defines how
+ // images not matching the verification policy will be treated.
+ // +kubebuilder:validation:Required
+ Policy Policy `json:"policy"`
+}
+
+// +k8s:deepcopy-gen=true
+type ClusterImagePolicyStatus struct {
+ // conditions provide details on the status of this API Resource.
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterImagePolicyList is a list of ClusterImagePolicy resources
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type ClusterImagePolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ClusterImagePolicy `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go
new file mode 100644
index 0000000000..247bab2184
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go
@@ -0,0 +1,236 @@
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImagePolicy holds namespace-wide configuration for image signature verification
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=imagepolicies,scope=Namespaced
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1457
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +openshift:enable:FeatureGate=ImagePolicy
+// +openshift:compatibility-gen:level=4
+type ImagePolicy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ Spec ImagePolicySpec `json:"spec"`
+ // status contains the observed state of the resource.
+ // +optional
+ Status ImagePolicyStatus `json:"status,omitempty"`
+}
+
+// ImagePolicySpec is the specification of the ImagePolicy CRD.
+type ImagePolicySpec struct {
+ // scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2".
+ // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest).
+ // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository
+ // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number).
+ // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not.
+ // Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images.
+ // If configured, the policies for OpenShift Container Platform repositories will not be in effect.
+ // For additional details about the format, please refer to the document explaining the docker transport field,
+ // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxItems=256
+ // +listType=set
+ Scopes []ImageScope `json:"scopes"`
+ // policy contains configuration to allow scopes to be verified, and defines how
+ // images not matching the verification policy will be treated.
+ // +kubebuilder:validation:Required
+ Policy Policy `json:"policy"`
+}
+
+// +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'"
+// +kubebuilder:validation:XValidation:rule=`self.contains('*') ? self.matches('^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$') : true`,message="invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching"
+// +kubebuilder:validation:XValidation:rule=`!self.contains('*') ? self.matches('^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$') : true`,message="invalid repository namespace or image specification in the image scope"
+// +kubebuilder:validation:MaxLength=512
+type ImageScope string
+
+// Policy defines the verification policy for the items in the scopes list.
+type Policy struct {
+ // rootOfTrust specifies the root of trust for the policy.
+ // +kubebuilder:validation:Required
+ RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"`
+ // signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact".
+ // +optional
+ SignedIdentity PolicyIdentity `json:"signedIdentity,omitempty"`
+}
+
+// PolicyRootOfTrust defines the root of trust based on the selected policyType.
+// +union
+// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise"
+type PolicyRootOfTrust struct {
+ // policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust.
+ // "PublicKey" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification.
+ // "FulcioCAWithRekor" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ PolicyType PolicyType `json:"policyType"`
+ // publicKey defines the root of trust based on a sigstore public key.
+ // +optional
+ PublicKey *PublicKey `json:"publicKey,omitempty"`
+ // fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.
+ // For more information about Fulcio and Rekor, please refer to the document at:
+ // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor
+ // +optional
+ FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"`
+}
+
+// +kubebuilder:validation:Enum=PublicKey;FulcioCAWithRekor
+type PolicyType string
+
+const (
+ PublicKeyRootOfTrust PolicyType = "PublicKey"
+ FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor"
+)
+
+// PublicKey defines the root of trust based on a sigstore public key.
+type PublicKey struct {
+ // keyData contains inline base64-encoded data for the PEM format public key.
+ // KeyData must be at most 8192 characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=8192
+ KeyData []byte `json:"keyData"`
+ // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key.
+ // rekorKeyData must be at most 8192 characters.
+ // +optional
+ // +kubebuilder:validation:MaxLength=8192
+ RekorKeyData []byte `json:"rekorKeyData,omitempty"`
+}
+
+// FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.
+type FulcioCAWithRekor struct {
+ // fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA.
+ // fulcioCAData must be at most 8192 characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=8192
+ FulcioCAData []byte `json:"fulcioCAData"`
+ // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key.
+ // rekorKeyData must be at most 8192 characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=8192
+ RekorKeyData []byte `json:"rekorKeyData"`
+ // fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration.
+ // +kubebuilder:validation:Required
+ FulcioSubject PolicyFulcioSubject `json:"fulcioSubject,omitempty"`
+}
+
+// PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.
+type PolicyFulcioSubject struct {
+ // oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token.
+ // Example: "https://expected.OIDC.issuer/"
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL"
+ OIDCIssuer string `json:"oidcIssuer"`
+ // signedEmail holds the email address the the Fulcio certificate is issued for.
+ // Example: "expected-signing-user@example.com"
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address"
+ SignedEmail string `json:"signedEmail"`
+}
+
+// PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact".
+// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise"
+// +union
+type PolicyIdentity struct {
+ // matchPolicy sets the type of matching to be used.
+ // Valid values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact".
+ // If set matchPolicy to ExactRepository, then the exactRepository must be specified.
+ // If set matchPolicy to RemapIdentity, then the remapIdentity must be specified.
+ // "MatchRepoDigestOrExact" means that the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity.
+ // "MatchRepository" means that the identity in the signature must be in the same repository as the image identity.
+ // "ExactRepository" means that the identity in the signature must be in the same repository as a specific identity specified by "repository".
+ // "RemapIdentity" means that the signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ MatchPolicy IdentityMatchPolicy `json:"matchPolicy"`
+ // exactRepository is required if matchPolicy is set to "ExactRepository".
+ // +optional
+ PolicyMatchExactRepository *PolicyMatchExactRepository `json:"exactRepository,omitempty"`
+ // remapIdentity is required if matchPolicy is set to "RemapIdentity".
+ // +optional
+ PolicyMatchRemapIdentity *PolicyMatchRemapIdentity `json:"remapIdentity,omitempty"`
+}
+
+// +kubebuilder:validation:MaxLength=512
+// +kubebuilder:validation:XValidation:rule=`self.matches('.*:([\\w][\\w.-]{0,127})$')? self.matches('^(localhost:[0-9]+)$'): true`,message="invalid repository or prefix in the signedIdentity, should not include the tag or digest"
+// +kubebuilder:validation:XValidation:rule=`self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')`,message="invalid repository or prefix in the signedIdentity"
+type IdentityRepositoryPrefix string
+
+type PolicyMatchExactRepository struct {
+ // repository is the reference of the image identity to be matched.
+ // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox
+ // +kubebuilder:validation:Required
+ Repository IdentityRepositoryPrefix `json:"repository"`
+}
+
+type PolicyMatchRemapIdentity struct {
+ // prefix is the prefix of the image identity to be matched.
+ // If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place).
+ // This useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure.
+ // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces,
+ // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form.
+ // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.
+ // +kubebuilder:validation:Required
+ Prefix IdentityRepositoryPrefix `json:"prefix"`
+ // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces,
+ // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form.
+ // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.
+ // +kubebuilder:validation:Required
+ SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"`
+}
+
+// IdentityMatchPolicy defines the type of matching for "matchPolicy".
+// +kubebuilder:validation:Enum=MatchRepoDigestOrExact;MatchRepository;ExactRepository;RemapIdentity
+type IdentityMatchPolicy string
+
+const (
+ IdentityMatchPolicyMatchRepoDigestOrExact IdentityMatchPolicy = "MatchRepoDigestOrExact"
+ IdentityMatchPolicyMatchRepository IdentityMatchPolicy = "MatchRepository"
+ IdentityMatchPolicyExactRepository IdentityMatchPolicy = "ExactRepository"
+ IdentityMatchPolicyRemapIdentity IdentityMatchPolicy = "RemapIdentity"
+)
+
+// +k8s:deepcopy-gen=true
+type ImagePolicyStatus struct {
+ // conditions provide details on the status of this API Resource.
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImagePolicyList is a list of ImagePolicy resources
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type ImagePolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ImagePolicy `json:"items"`
+}
+
+const (
+ // ImagePolicyPending indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid.
+ ImagePolicyPending = "Pending"
+ // ImagePolicyApplied indicates that the policy has been applied
+ ImagePolicyApplied = "Applied"
+)
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go
new file mode 100644
index 0000000000..171e96d5b8
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go
@@ -0,0 +1,88 @@
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+//
+// InsightsDataGather provides data gather configuration options for the the Insights Operator.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1245
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +openshift:enable:FeatureGate=InsightsConfig
+// +openshift:compatibility-gen:level=4
+type InsightsDataGather struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ Spec InsightsDataGatherSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status InsightsDataGatherStatus `json:"status"`
+}
+
+type InsightsDataGatherSpec struct {
+ // gatherConfig spec attribute includes all the configuration options related to
+ // gathering of the Insights data and its uploading to the ingress.
+ // +optional
+ GatherConfig GatherConfig `json:"gatherConfig,omitempty"`
+}
+
+type InsightsDataGatherStatus struct {
+}
+
+// gatherConfig provides data gathering configuration options.
+type GatherConfig struct {
+ // dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain
+ // in the Insights archive data. Valid values are "None" and "ObfuscateNetworking".
+ // When set to None the data is not obfuscated.
+ // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.
+ // The current default is None.
+ // +optional
+ DataPolicy DataPolicy `json:"dataPolicy,omitempty"`
+ // disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing "all" value.
+ // If all the gatherers are disabled, the Insights operator does not gather any data.
+ // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md.
+ // Run the following command to get the names of last active gatherers:
+ // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'"
+ // An example of disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", "workloads/workload_info"]`
+ // +optional
+ DisabledGatherers []string `json:"disabledGatherers"`
+}
+
+const (
+ // No data obfuscation
+ NoPolicy DataPolicy = "None"
+ // IP addresses and cluster domain name are obfuscated
+ ObfuscateNetworking DataPolicy = "ObfuscateNetworking"
+)
+
+// dataPolicy declares valid data policy types
+// +kubebuilder:validation:Enum="";None;ObfuscateNetworking
+type DataPolicy string
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InsightsDataGatherList is a collection of items
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type InsightsDataGatherList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+ Items []InsightsDataGather `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..ab39b5b915
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,678 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Backup) DeepCopyInto(out *Backup) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup.
+func (in *Backup) DeepCopy() *Backup {
+ if in == nil {
+ return nil
+ }
+ out := new(Backup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Backup) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupList) DeepCopyInto(out *BackupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Backup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList.
+func (in *BackupList) DeepCopy() *BackupList {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BackupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
+ *out = *in
+ in.EtcdBackupSpec.DeepCopyInto(&out.EtcdBackupSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec.
+func (in *BackupSpec) DeepCopy() *BackupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupStatus) DeepCopyInto(out *BackupStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus.
+func (in *BackupStatus) DeepCopy() *BackupStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicy.
+func (in *ClusterImagePolicy) DeepCopy() *ClusterImagePolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterImagePolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterImagePolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterImagePolicyList) DeepCopyInto(out *ClusterImagePolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterImagePolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyList.
+func (in *ClusterImagePolicyList) DeepCopy() *ClusterImagePolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterImagePolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterImagePolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterImagePolicySpec) DeepCopyInto(out *ClusterImagePolicySpec) {
+ *out = *in
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]ImageScope, len(*in))
+ copy(*out, *in)
+ }
+ in.Policy.DeepCopyInto(&out.Policy)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicySpec.
+func (in *ClusterImagePolicySpec) DeepCopy() *ClusterImagePolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterImagePolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyStatus.
+func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterImagePolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) {
+ *out = *in
+ in.RetentionPolicy.DeepCopyInto(&out.RetentionPolicy)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupSpec.
+func (in *EtcdBackupSpec) DeepCopy() *EtcdBackupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdBackupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FulcioCAWithRekor) DeepCopyInto(out *FulcioCAWithRekor) {
+ *out = *in
+ if in.FulcioCAData != nil {
+ in, out := &in.FulcioCAData, &out.FulcioCAData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.RekorKeyData != nil {
+ in, out := &in.RekorKeyData, &out.RekorKeyData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ out.FulcioSubject = in.FulcioSubject
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulcioCAWithRekor.
+func (in *FulcioCAWithRekor) DeepCopy() *FulcioCAWithRekor {
+ if in == nil {
+ return nil
+ }
+ out := new(FulcioCAWithRekor)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GatherConfig) DeepCopyInto(out *GatherConfig) {
+ *out = *in
+ if in.DisabledGatherers != nil {
+ in, out := &in.DisabledGatherers, &out.DisabledGatherers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherConfig.
+func (in *GatherConfig) DeepCopy() *GatherConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GatherConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePolicy) DeepCopyInto(out *ImagePolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicy.
+func (in *ImagePolicy) DeepCopy() *ImagePolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImagePolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImagePolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyList.
+func (in *ImagePolicyList) DeepCopy() *ImagePolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImagePolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) {
+ *out = *in
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]ImageScope, len(*in))
+ copy(*out, *in)
+ }
+ in.Policy.DeepCopyInto(&out.Policy)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicySpec.
+func (in *ImagePolicySpec) DeepCopy() *ImagePolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyStatus.
+func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGather.
+func (in *InsightsDataGather) DeepCopy() *InsightsDataGather {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsDataGather)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InsightsDataGather) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsDataGatherList) DeepCopyInto(out *InsightsDataGatherList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]InsightsDataGather, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherList.
+func (in *InsightsDataGatherList) DeepCopy() *InsightsDataGatherList {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsDataGatherList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InsightsDataGatherList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsDataGatherSpec) DeepCopyInto(out *InsightsDataGatherSpec) {
+ *out = *in
+ in.GatherConfig.DeepCopyInto(&out.GatherConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherSpec.
+func (in *InsightsDataGatherSpec) DeepCopy() *InsightsDataGatherSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsDataGatherSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsDataGatherStatus) DeepCopyInto(out *InsightsDataGatherStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherStatus.
+func (in *InsightsDataGatherStatus) DeepCopy() *InsightsDataGatherStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsDataGatherStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+ *out = *in
+ in.RootOfTrust.DeepCopyInto(&out.RootOfTrust)
+ in.SignedIdentity.DeepCopyInto(&out.SignedIdentity)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+ if in == nil {
+ return nil
+ }
+ out := new(Policy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyFulcioSubject.
+func (in *PolicyFulcioSubject) DeepCopy() *PolicyFulcioSubject {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyFulcioSubject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyIdentity) DeepCopyInto(out *PolicyIdentity) {
+ *out = *in
+ if in.PolicyMatchExactRepository != nil {
+ in, out := &in.PolicyMatchExactRepository, &out.PolicyMatchExactRepository
+ *out = new(PolicyMatchExactRepository)
+ **out = **in
+ }
+ if in.PolicyMatchRemapIdentity != nil {
+ in, out := &in.PolicyMatchRemapIdentity, &out.PolicyMatchRemapIdentity
+ *out = new(PolicyMatchRemapIdentity)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyIdentity.
+func (in *PolicyIdentity) DeepCopy() *PolicyIdentity {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyIdentity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyMatchExactRepository) DeepCopyInto(out *PolicyMatchExactRepository) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchExactRepository.
+func (in *PolicyMatchExactRepository) DeepCopy() *PolicyMatchExactRepository {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyMatchExactRepository)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyMatchRemapIdentity) DeepCopyInto(out *PolicyMatchRemapIdentity) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchRemapIdentity.
+func (in *PolicyMatchRemapIdentity) DeepCopy() *PolicyMatchRemapIdentity {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyMatchRemapIdentity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) {
+ *out = *in
+ if in.PublicKey != nil {
+ in, out := &in.PublicKey, &out.PublicKey
+ *out = new(PublicKey)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FulcioCAWithRekor != nil {
+ in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor
+ *out = new(FulcioCAWithRekor)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRootOfTrust.
+func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyRootOfTrust)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PublicKey) DeepCopyInto(out *PublicKey) {
+ *out = *in
+ if in.KeyData != nil {
+ in, out := &in.KeyData, &out.KeyData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.RekorKeyData != nil {
+ in, out := &in.RekorKeyData, &out.RekorKeyData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicKey.
+func (in *PublicKey) DeepCopy() *PublicKey {
+ if in == nil {
+ return nil
+ }
+ out := new(PublicKey)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RetentionNumberConfig) DeepCopyInto(out *RetentionNumberConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionNumberConfig.
+func (in *RetentionNumberConfig) DeepCopy() *RetentionNumberConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RetentionNumberConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RetentionPolicy) DeepCopyInto(out *RetentionPolicy) {
+ *out = *in
+ if in.RetentionNumber != nil {
+ in, out := &in.RetentionNumber, &out.RetentionNumber
+ *out = new(RetentionNumberConfig)
+ **out = **in
+ }
+ if in.RetentionSize != nil {
+ in, out := &in.RetentionSize, &out.RetentionSize
+ *out = new(RetentionSizeConfig)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicy.
+func (in *RetentionPolicy) DeepCopy() *RetentionPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(RetentionPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RetentionSizeConfig) DeepCopyInto(out *RetentionSizeConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionSizeConfig.
+func (in *RetentionSizeConfig) DeepCopy() *RetentionSizeConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RetentionSizeConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..9b5744d4a0
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,92 @@
+backups.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1482
+ CRDName: backups.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - AutomatedEtcdBackup
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: Backup
+ Labels: {}
+ PluralName: backups
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates:
+ - AutomatedEtcdBackup
+ Version: v1alpha1
+
+clusterimagepolicies.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1457
+ CRDName: clusterimagepolicies.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - ImagePolicy
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ClusterImagePolicy
+ Labels: {}
+ PluralName: clusterimagepolicies
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates:
+ - ImagePolicy
+ Version: v1alpha1
+
+imagepolicies.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1457
+ CRDName: imagepolicies.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - ImagePolicy
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: ImagePolicy
+ Labels: {}
+ PluralName: imagepolicies
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates:
+ - ImagePolicy
+ Version: v1alpha1
+
+insightsdatagathers.config.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1245
+ CRDName: insightsdatagathers.config.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - InsightsConfig
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: config.openshift.io
+ HasStatus: true
+ KindName: InsightsDataGather
+ Labels: {}
+ PluralName: insightsdatagathers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates:
+ - InsightsConfig
+ Version: v1alpha1
+
diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..efaac4fa2a
--- /dev/null
+++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,277 @@
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Backup = map[string]string{
+ "": "\n\nBackup provides configuration for performing backups of the openshift cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Backup) SwaggerDoc() map[string]string {
+ return map_Backup
+}
+
+var map_BackupList = map[string]string{
+ "": "BackupList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (BackupList) SwaggerDoc() map[string]string {
+ return map_BackupList
+}
+
+var map_BackupSpec = map[string]string{
+ "etcd": "etcd specifies the configuration for periodic backups of the etcd cluster",
+}
+
+func (BackupSpec) SwaggerDoc() map[string]string {
+ return map_BackupSpec
+}
+
+var map_EtcdBackupSpec = map[string]string{
+ "": "EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator",
+ "schedule": "Schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.",
+ "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones",
+ "retentionPolicy": "RetentionPolicy defines the retention policy for retaining and deleting existing backups.",
+ "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.",
+}
+
+func (EtcdBackupSpec) SwaggerDoc() map[string]string {
+ return map_EtcdBackupSpec
+}
+
+var map_RetentionNumberConfig = map[string]string{
+ "": "RetentionNumberConfig specifies the configuration of the retention policy on the number of backups",
+ "maxNumberOfBackups": "MaxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.",
+}
+
+func (RetentionNumberConfig) SwaggerDoc() map[string]string {
+ return map_RetentionNumberConfig
+}
+
+var map_RetentionPolicy = map[string]string{
+ "": "RetentionPolicy defines the retention policy for retaining and deleting existing backups. This struct is a discriminated union that allows users to select the type of retention policy from the supported types.",
+ "retentionType": "RetentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.",
+ "retentionNumber": "RetentionNumber configures the retention policy based on the number of backups",
+ "retentionSize": "RetentionSize configures the retention policy based on the size of backups",
+}
+
+func (RetentionPolicy) SwaggerDoc() map[string]string {
+ return map_RetentionPolicy
+}
+
+var map_RetentionSizeConfig = map[string]string{
+ "": "RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups",
+ "maxSizeOfBackupsGb": "MaxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.",
+}
+
+func (RetentionSizeConfig) SwaggerDoc() map[string]string {
+ return map_RetentionSizeConfig
+}
+
+var map_ClusterImagePolicy = map[string]string{
+ "": "ClusterImagePolicy holds cluster-wide configuration for image signature verification\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec contains the configuration for the cluster image policy.",
+ "status": "status contains the observed state of the resource.",
+}
+
+func (ClusterImagePolicy) SwaggerDoc() map[string]string {
+ return map_ClusterImagePolicy
+}
+
+var map_ClusterImagePolicyList = map[string]string{
+ "": "ClusterImagePolicyList is a list of ClusterImagePolicy resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ClusterImagePolicyList) SwaggerDoc() map[string]string {
+ return map_ClusterImagePolicyList
+}
+
+var map_ClusterImagePolicySpec = map[string]string{
+ "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.",
+ "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images. If configured, the policies for OpenShift Container Platform repositories will not be in effect. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker",
+ "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.",
+}
+
+func (ClusterImagePolicySpec) SwaggerDoc() map[string]string {
+ return map_ClusterImagePolicySpec
+}
+
+var map_ClusterImagePolicyStatus = map[string]string{
+ "conditions": "conditions provide details on the status of this API Resource.",
+}
+
+func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string {
+ return map_ClusterImagePolicyStatus
+}
+
+var map_FulcioCAWithRekor = map[string]string{
+ "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.",
+ "fulcioCAData": "fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters.",
+ "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.",
+ "fulcioSubject": "fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration.",
+}
+
+func (FulcioCAWithRekor) SwaggerDoc() map[string]string {
+ return map_FulcioCAWithRekor
+}
+
+var map_ImagePolicy = map[string]string{
+ "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status contains the observed state of the resource.",
+}
+
+func (ImagePolicy) SwaggerDoc() map[string]string {
+ return map_ImagePolicy
+}
+
+var map_ImagePolicyList = map[string]string{
+ "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImagePolicyList) SwaggerDoc() map[string]string {
+ return map_ImagePolicyList
+}
+
+var map_ImagePolicySpec = map[string]string{
+ "": "ImagePolicySpec is the specification of the ImagePolicy CRD.",
+ "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. Please be aware that the scopes should not be nested under the repositories of OpenShift Container Platform images. If configured, the policies for OpenShift Container Platform repositories will not be in effect. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker",
+ "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.",
+}
+
+func (ImagePolicySpec) SwaggerDoc() map[string]string {
+ return map_ImagePolicySpec
+}
+
+var map_ImagePolicyStatus = map[string]string{
+ "conditions": "conditions provide details on the status of this API Resource.",
+}
+
+func (ImagePolicyStatus) SwaggerDoc() map[string]string {
+ return map_ImagePolicyStatus
+}
+
+var map_Policy = map[string]string{
+ "": "Policy defines the verification policy for the items in the scopes list.",
+ "rootOfTrust": "rootOfTrust specifies the root of trust for the policy.",
+ "signedIdentity": "signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".",
+}
+
+func (Policy) SwaggerDoc() map[string]string {
+ return map_Policy
+}
+
+var map_PolicyFulcioSubject = map[string]string{
+ "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.",
+ "oidcIssuer": "oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"",
+ "signedEmail": "signedEmail holds the email address the the Fulcio certificate is issued for. Example: \"expected-signing-user@example.com\"",
+}
+
+func (PolicyFulcioSubject) SwaggerDoc() map[string]string {
+ return map_PolicyFulcioSubject
+}
+
+var map_PolicyIdentity = map[string]string{
+ "": "PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is \"MatchRepoDigestOrExact\".",
+ "matchPolicy": "matchPolicy sets the type of matching to be used. Valid values are \"MatchRepoDigestOrExact\", \"MatchRepository\", \"ExactRepository\", \"RemapIdentity\". When omitted, the default value is \"MatchRepoDigestOrExact\". If set matchPolicy to ExactRepository, then the exactRepository must be specified. If set matchPolicy to RemapIdentity, then the remapIdentity must be specified. \"MatchRepoDigestOrExact\" means that the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. \"MatchRepository\" means that the identity in the signature must be in the same repository as the image identity. \"ExactRepository\" means that the identity in the signature must be in the same repository as a specific identity specified by \"repository\". \"RemapIdentity\" means that the signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the \"prefix\" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.",
+ "exactRepository": "exactRepository is required if matchPolicy is set to \"ExactRepository\".",
+ "remapIdentity": "remapIdentity is required if matchPolicy is set to \"RemapIdentity\".",
+}
+
+func (PolicyIdentity) SwaggerDoc() map[string]string {
+ return map_PolicyIdentity
+}
+
+var map_PolicyMatchExactRepository = map[string]string{
+ "repository": "repository is the reference of the image identity to be matched. The value should be a repository name (by omitting the tag or digest) in a registry implementing the \"Docker Registry HTTP API V2\". For example, docker.io/library/busybox",
+}
+
+func (PolicyMatchExactRepository) SwaggerDoc() map[string]string {
+ return map_PolicyMatchExactRepository
+}
+
+var map_PolicyMatchRemapIdentity = map[string]string{
+ "prefix": "prefix is the prefix of the image identity to be matched. If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). This useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.",
+ "signedPrefix": "signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as \"prefix\". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.",
+}
+
+func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string {
+ return map_PolicyMatchRemapIdentity
+}
+
+var map_PolicyRootOfTrust = map[string]string{
+ "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.",
+ "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification.",
+ "publicKey": "publicKey defines the root of trust based on a sigstore public key.",
+ "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor",
+}
+
+func (PolicyRootOfTrust) SwaggerDoc() map[string]string {
+ return map_PolicyRootOfTrust
+}
+
+var map_PublicKey = map[string]string{
+ "": "PublicKey defines the root of trust based on a sigstore public key.",
+ "keyData": "keyData contains inline base64-encoded data for the PEM format public key. KeyData must be at most 8192 characters.",
+ "rekorKeyData": "rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters.",
+}
+
+func (PublicKey) SwaggerDoc() map[string]string {
+ return map_PublicKey
+}
+
+var map_GatherConfig = map[string]string{
+ "": "gatherConfig provides data gathering configuration options.",
+ "dataPolicy": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is None.",
+ "disabledGatherers": "disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing \"all\" value. If all the gatherers are disabled, the Insights operator does not gather any data. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\" An example of disabling gatherers looks like this: `disabledGatherers: [\"clusterconfig/machine_configs\", \"workloads/workload_info\"]`",
+}
+
+func (GatherConfig) SwaggerDoc() map[string]string {
+ return map_GatherConfig
+}
+
+var map_InsightsDataGather = map[string]string{
+ "": "\n\nInsightsDataGather provides data gather configuration options for the the Insights Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (InsightsDataGather) SwaggerDoc() map[string]string {
+ return map_InsightsDataGather
+}
+
+var map_InsightsDataGatherList = map[string]string{
+ "": "InsightsDataGatherList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (InsightsDataGatherList) SwaggerDoc() map[string]string {
+ return map_InsightsDataGatherList
+}
+
+var map_InsightsDataGatherSpec = map[string]string{
+ "gatherConfig": "gatherConfig spec attribute includes all the configuration options related to gathering of the Insights data and its uploading to the ingress.",
+}
+
+func (InsightsDataGatherSpec) SwaggerDoc() map[string]string {
+ return map_InsightsDataGatherSpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/console/.codegen.yaml b/vendor/github.com/openshift/api/console/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/console/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/console/OWNERS b/vendor/github.com/openshift/api/console/OWNERS
new file mode 100644
index 0000000000..d392780701
--- /dev/null
+++ b/vendor/github.com/openshift/api/console/OWNERS
@@ -0,0 +1,3 @@
+reviewers:
+ - jhadvig
+ - spadgett
diff --git a/vendor/github.com/openshift/api/console/install.go b/vendor/github.com/openshift/api/console/install.go
new file mode 100644
index 0000000000..147d023b7b
--- /dev/null
+++ b/vendor/github.com/openshift/api/console/install.go
@@ -0,0 +1,26 @@
+package console
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ consolev1 "github.com/openshift/api/console/v1"
+)
+
+const (
+ GroupName = "console.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(consolev1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md
new file mode 100644
index 0000000000..9d3264071b
--- /dev/null
+++ b/vendor/github.com/openshift/api/features.md
@@ -0,0 +1,74 @@
+| FeatureGate | Default on Hypershift | Default on SelfManagedHA | DevPreviewNoUpgrade on Hypershift | DevPreviewNoUpgrade on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA |
+| ------ | --- | --- | --- | --- | --- | --- |
+| ClusterAPIInstall| | | | | | |
+| ClusterAPIInstallAzure| | | | | | |
+| ClusterAPIInstallIBMCloud| | | | | | |
+| EventedPLEG| | | | | | |
+| MachineAPIOperatorDisableMachineHealthCheckController| | | | | | |
+| GatewayAPI| | | Enabled | Enabled | | |
+| AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled |
+| CSIDriverSharedResource| | | Enabled | Enabled | Enabled | Enabled |
+| ChunkSizeMiB| | | Enabled | Enabled | Enabled | Enabled |
+| ClusterAPIInstallGCP| | | Enabled | Enabled | Enabled | Enabled |
+| ClusterAPIInstallPowerVS| | | Enabled | Enabled | Enabled | Enabled |
+| DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled |
+| DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled |
+| EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled |
+| Example| | | Enabled | Enabled | Enabled | Enabled |
+| ExternalRouteCertificate| | | Enabled | Enabled | Enabled | Enabled |
+| GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled |
+| GCPLabelsTags| | | Enabled | Enabled | Enabled | Enabled |
+| ImagePolicy| | | Enabled | Enabled | Enabled | Enabled |
+| InsightsConfig| | | Enabled | Enabled | Enabled | Enabled |
+| InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled |
+| InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled |
+| InstallAlternateInfrastructureAWS| | | Enabled | Enabled | Enabled | Enabled |
+| MachineAPIProviderOpenStack| | | Enabled | Enabled | Enabled | Enabled |
+| MachineConfigNodes| | | Enabled | Enabled | Enabled | Enabled |
+| ManagedBootImages| | | Enabled | Enabled | Enabled | Enabled |
+| MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled |
+| MetricsCollectionProfiles| | | Enabled | Enabled | Enabled | Enabled |
+| MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled |
+| NewOLM| | | Enabled | Enabled | Enabled | Enabled |
+| NodeDisruptionPolicy| | | Enabled | Enabled | Enabled | Enabled |
+| NodeSwap| | | Enabled | Enabled | Enabled | Enabled |
+| OnClusterBuild| | | Enabled | Enabled | Enabled | Enabled |
+| PinnedImages| | | Enabled | Enabled | Enabled | Enabled |
+| PlatformOperators| | | Enabled | Enabled | Enabled | Enabled |
+| RouteExternalCertificate| | | Enabled | Enabled | Enabled | Enabled |
+| ServiceAccountTokenNodeBinding| | | Enabled | Enabled | Enabled | Enabled |
+| ServiceAccountTokenNodeBindingValidation| | | Enabled | Enabled | Enabled | Enabled |
+| ServiceAccountTokenPodNodeInfo| | | Enabled | Enabled | Enabled | Enabled |
+| SignatureStores| | | Enabled | Enabled | Enabled | Enabled |
+| SigstoreImageVerification| | | Enabled | Enabled | Enabled | Enabled |
+| TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | Enabled | Enabled |
+| UpgradeStatus| | | Enabled | Enabled | Enabled | Enabled |
+| VSphereDriverConfiguration| | | Enabled | Enabled | Enabled | Enabled |
+| VSphereMultiVCenters| | | Enabled | Enabled | Enabled | Enabled |
+| VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled |
+| ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled |
+| AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| BareMetalLoadBalancer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| CloudDualStackNodeIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ClusterAPIInstallAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ClusterAPIInstallNutanix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ClusterAPIInstallOpenStack| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ClusterAPIInstallVSphere| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| DisableKubeletCloudCredentialProviders| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ExternalCloudProvider| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ExternalCloudProviderAzure| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ExternalCloudProviderExternal| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ExternalCloudProviderGCP| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| HardwareSpeed| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| MetricsServer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| PrivateHostedZoneAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| VSphereControlPlaneMachineSet| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
+| ValidatingAdmissionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled |
diff --git a/vendor/github.com/openshift/api/helm/.codegen.yaml b/vendor/github.com/openshift/api/helm/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/helm/install.go b/vendor/github.com/openshift/api/helm/install.go
new file mode 100644
index 0000000000..6c8f51892a
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/install.go
@@ -0,0 +1,26 @@
+package helm
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ helmv1beta1 "github.com/openshift/api/helm/v1beta1"
+)
+
+const (
+ GroupName = "helm.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(helmv1beta1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/Makefile b/vendor/github.com/openshift/api/helm/v1beta1/Makefile
new file mode 100644
index 0000000000..d615908332
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="helm.openshift.io/v1beta1"
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/doc.go b/vendor/github.com/openshift/api/helm/v1beta1/doc.go
new file mode 100644
index 0000000000..8a45cd1c81
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=helm.openshift.io
+// Package v1 is the v1 version of the API.
+package v1beta1
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/register.go b/vendor/github.com/openshift/api/helm/v1beta1/register.go
new file mode 100644
index 0000000000..1301eb008e
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/register.go
@@ -0,0 +1,40 @@
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "helm.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &HelmChartRepository{},
+ &HelmChartRepositoryList{},
+ &ProjectHelmChartRepository{},
+ &ProjectHelmChartRepositoryList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go
new file mode 100644
index 0000000000..91f25fb673
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go
@@ -0,0 +1,104 @@
+package v1beta1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:plural=helmchartrepositories
+
+// HelmChartRepository holds cluster-wide configuration for proxied Helm chart repository
+//
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=helmchartrepositories,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/598
+// +openshift:file-pattern=operatorOrdering=00
+type HelmChartRepository struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec HelmChartRepositorySpec `json:"spec"`
+
+ // Observed status of the repository within the cluster..
+ // +optional
+ Status HelmChartRepositoryStatus `json:"status"`
+}
+
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:compatibility-gen:level=2
+type HelmChartRepositoryList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []HelmChartRepository `json:"items"`
+}
+
+// Helm chart repository exposed within the cluster
+type HelmChartRepositorySpec struct {
+
+ // If set to true, disable the repo usage in the cluster/namespace
+ // +optional
+ Disabled bool `json:"disabled,omitempty"`
+
+ // Optional associated human readable repository name, it can be used by UI for displaying purposes
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=100
+ // +optional
+ DisplayName string `json:"name,omitempty"`
+
+ // Optional human readable repository description, it can be used by UI for displaying purposes
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=2048
+ // +optional
+ Description string `json:"description,omitempty"`
+
+ // Required configuration for connecting to the chart repo
+ ConnectionConfig ConnectionConfig `json:"connectionConfig"`
+}
+
+type ConnectionConfig struct {
+
+ // Chart repository URL
+ // +kubebuilder:validation:Pattern=`^https?:\/\/`
+ // +kubebuilder:validation:MaxLength=2048
+ URL string `json:"url"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca-bundle.crt" is used to locate the data.
+ // If empty, the default system roots are used.
+ // The namespace for this config map is openshift-config.
+ // +optional
+ CA configv1.ConfigMapNameReference `json:"ca,omitempty"`
+
+ // tlsClientConfig is an optional reference to a secret by name that contains the
+ // PEM-encoded TLS client certificate and private key to present when connecting to the server.
+ // The key "tls.crt" is used to locate the client certificate.
+ // The key "tls.key" is used to locate the private key.
+ // The namespace for this secret is openshift-config.
+ // +optional
+ TLSClientConfig configv1.SecretNameReference `json:"tlsClientConfig,omitempty"`
+}
+
+type HelmChartRepositoryStatus struct {
+
+ // conditions is a list of conditions and their statuses
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go
new file mode 100644
index 0000000000..37ff581c14
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go
@@ -0,0 +1,104 @@
+package v1beta1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:plural=projecthelmchartrepositories
+
+// ProjectHelmChartRepository holds namespace-wide configuration for proxied Helm chart repository
+//
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=projecthelmchartrepositories,scope=Namespaced
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1084
+// +openshift:file-pattern=operatorOrdering=00
+type ProjectHelmChartRepository struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ProjectHelmChartRepositorySpec `json:"spec"`
+
+ // Observed status of the repository within the namespace..
+ // +optional
+ Status HelmChartRepositoryStatus `json:"status"`
+}
+
+// Project Helm chart repository exposed within a namespace
+type ProjectHelmChartRepositorySpec struct {
+
+ // If set to true, disable the repo usage in the namespace
+ // +optional
+ Disabled bool `json:"disabled,omitempty"`
+
+ // Optional associated human readable repository name, it can be used by UI for displaying purposes
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=100
+ // +optional
+ DisplayName string `json:"name,omitempty"`
+
+ // Optional human readable repository description, it can be used by UI for displaying purposes
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=2048
+ // +optional
+ Description string `json:"description,omitempty"`
+
+ // Required configuration for connecting to the chart repo
+ ProjectConnectionConfig ConnectionConfigNamespaceScoped `json:"connectionConfig"`
+}
+
+type ConnectionConfigNamespaceScoped struct {
+
+ // Chart repository URL
+ // +kubebuilder:validation:Pattern=`^https?:\/\/`
+ // +kubebuilder:validation:MaxLength=2048
+ URL string `json:"url"`
+
+ // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle.
+ // It is used as a trust anchor to validate the TLS certificate presented by the remote server.
+ // The key "ca-bundle.crt" is used to locate the data.
+ // If empty, the default system roots are used.
+ // The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated.
+ // +optional
+ CA configv1.ConfigMapNameReference `json:"ca,omitempty"`
+
+ // tlsClientConfig is an optional reference to a secret by name that contains the
+ // PEM-encoded TLS client certificate and private key to present when connecting to the server.
+ // The key "tls.crt" is used to locate the client certificate.
+ // The key "tls.key" is used to locate the private key.
+ // The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated.
+ // +optional
+ TLSClientConfig configv1.SecretNameReference `json:"tlsClientConfig,omitempty"`
+
+ // basicAuthConfig is an optional reference to a secret by name that contains
+ // the basic authentication credentials to present when connecting to the server.
+ // The key "username" is used locate the username.
+ // The key "password" is used to locate the password.
+ // The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated.
+ // +optional
+ BasicAuthConfig configv1.SecretNameReference `json:"basicAuthConfig,omitempty"`
+}
+
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:compatibility-gen:level=2
+type ProjectHelmChartRepositoryList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ProjectHelmChartRepository `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..da33cc3efb
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,227 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConnectionConfig) DeepCopyInto(out *ConnectionConfig) {
+ *out = *in
+ out.CA = in.CA
+ out.TLSClientConfig = in.TLSClientConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfig.
+func (in *ConnectionConfig) DeepCopy() *ConnectionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ConnectionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConnectionConfigNamespaceScoped) DeepCopyInto(out *ConnectionConfigNamespaceScoped) {
+ *out = *in
+ out.CA = in.CA
+ out.TLSClientConfig = in.TLSClientConfig
+ out.BasicAuthConfig = in.BasicAuthConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfigNamespaceScoped.
+func (in *ConnectionConfigNamespaceScoped) DeepCopy() *ConnectionConfigNamespaceScoped {
+ if in == nil {
+ return nil
+ }
+ out := new(ConnectionConfigNamespaceScoped)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChartRepository) DeepCopyInto(out *HelmChartRepository) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepository.
+func (in *HelmChartRepository) DeepCopy() *HelmChartRepository {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChartRepository)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HelmChartRepository) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChartRepositoryList) DeepCopyInto(out *HelmChartRepositoryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HelmChartRepository, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepositoryList.
+func (in *HelmChartRepositoryList) DeepCopy() *HelmChartRepositoryList {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChartRepositoryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HelmChartRepositoryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChartRepositorySpec) DeepCopyInto(out *HelmChartRepositorySpec) {
+ *out = *in
+ out.ConnectionConfig = in.ConnectionConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepositorySpec.
+func (in *HelmChartRepositorySpec) DeepCopy() *HelmChartRepositorySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChartRepositorySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChartRepositoryStatus) DeepCopyInto(out *HelmChartRepositoryStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepositoryStatus.
+func (in *HelmChartRepositoryStatus) DeepCopy() *HelmChartRepositoryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChartRepositoryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectHelmChartRepository) DeepCopyInto(out *ProjectHelmChartRepository) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartRepository.
+func (in *ProjectHelmChartRepository) DeepCopy() *ProjectHelmChartRepository {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectHelmChartRepository)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectHelmChartRepository) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectHelmChartRepositoryList) DeepCopyInto(out *ProjectHelmChartRepositoryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ProjectHelmChartRepository, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartRepositoryList.
+func (in *ProjectHelmChartRepositoryList) DeepCopy() *ProjectHelmChartRepositoryList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectHelmChartRepositoryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectHelmChartRepositoryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectHelmChartRepositorySpec) DeepCopyInto(out *ProjectHelmChartRepositorySpec) {
+ *out = *in
+ out.ProjectConnectionConfig = in.ProjectConnectionConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartRepositorySpec.
+func (in *ProjectHelmChartRepositorySpec) DeepCopy() *ProjectHelmChartRepositorySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectHelmChartRepositorySpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..218c072c16
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,42 @@
+helmchartrepositories.helm.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/598
+ CRDName: helmchartrepositories.helm.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: ""
+ GroupName: helm.openshift.io
+ HasStatus: true
+ KindName: HelmChartRepository
+ Labels: {}
+ PluralName: helmchartrepositories
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1beta1
+
+projecthelmchartrepositories.helm.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1084
+ CRDName: projecthelmchartrepositories.helm.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: ""
+ GroupName: helm.openshift.io
+ HasStatus: true
+ KindName: ProjectHelmChartRepository
+ Labels: {}
+ PluralName: projecthelmchartrepositories
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1beta1
+
diff --git a/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..42d986f23e
--- /dev/null
+++ b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,107 @@
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_ConnectionConfig = map[string]string{
+ "url": "Chart repository URL",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca-bundle.crt\" is used to locate the data. If empty, the default system roots are used. The namespace for this config map is openshift-config.",
+ "tlsClientConfig": "tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key \"tls.crt\" is used to locate the client certificate. The key \"tls.key\" is used to locate the private key. The namespace for this secret is openshift-config.",
+}
+
+func (ConnectionConfig) SwaggerDoc() map[string]string {
+ return map_ConnectionConfig
+}
+
+var map_HelmChartRepository = map[string]string{
+ "": "HelmChartRepository holds cluster-wide configuration for proxied Helm chart repository\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "Observed status of the repository within the cluster..",
+}
+
+func (HelmChartRepository) SwaggerDoc() map[string]string {
+ return map_HelmChartRepository
+}
+
+var map_HelmChartRepositoryList = map[string]string{
+ "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (HelmChartRepositoryList) SwaggerDoc() map[string]string {
+ return map_HelmChartRepositoryList
+}
+
+var map_HelmChartRepositorySpec = map[string]string{
+ "": "Helm chart repository exposed within the cluster",
+ "disabled": "If set to true, disable the repo usage in the cluster/namespace",
+ "name": "Optional associated human readable repository name, it can be used by UI for displaying purposes",
+ "description": "Optional human readable repository description, it can be used by UI for displaying purposes",
+ "connectionConfig": "Required configuration for connecting to the chart repo",
+}
+
+func (HelmChartRepositorySpec) SwaggerDoc() map[string]string {
+ return map_HelmChartRepositorySpec
+}
+
+var map_HelmChartRepositoryStatus = map[string]string{
+ "conditions": "conditions is a list of conditions and their statuses",
+}
+
+func (HelmChartRepositoryStatus) SwaggerDoc() map[string]string {
+ return map_HelmChartRepositoryStatus
+}
+
+var map_ConnectionConfigNamespaceScoped = map[string]string{
+ "url": "Chart repository URL",
+ "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca-bundle.crt\" is used to locate the data. If empty, the default system roots are used. The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated.",
+ "tlsClientConfig": "tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key \"tls.crt\" is used to locate the client certificate. The key \"tls.key\" is used to locate the private key. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated.",
+ "basicAuthConfig": "basicAuthConfig is an optional reference to a secret by name that contains the basic authentication credentials to present when connecting to the server. The key \"username\" is used locate the username. The key \"password\" is used to locate the password. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated.",
+}
+
+func (ConnectionConfigNamespaceScoped) SwaggerDoc() map[string]string {
+ return map_ConnectionConfigNamespaceScoped
+}
+
+var map_ProjectHelmChartRepository = map[string]string{
+ "": "ProjectHelmChartRepository holds namespace-wide configuration for proxied Helm chart repository\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "Observed status of the repository within the namespace..",
+}
+
+func (ProjectHelmChartRepository) SwaggerDoc() map[string]string {
+ return map_ProjectHelmChartRepository
+}
+
+var map_ProjectHelmChartRepositoryList = map[string]string{
+ "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ProjectHelmChartRepositoryList) SwaggerDoc() map[string]string {
+ return map_ProjectHelmChartRepositoryList
+}
+
+var map_ProjectHelmChartRepositorySpec = map[string]string{
+ "": "Project Helm chart repository exposed within a namespace",
+ "disabled": "If set to true, disable the repo usage in the namespace",
+ "name": "Optional associated human readable repository name, it can be used by UI for displaying purposes",
+ "description": "Optional human readable repository description, it can be used by UI for displaying purposes",
+ "connectionConfig": "Required configuration for connecting to the chart repo",
+}
+
+func (ProjectHelmChartRepositorySpec) SwaggerDoc() map[string]string {
+ return map_ProjectHelmChartRepositorySpec
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/image/.codegen.yaml b/vendor/github.com/openshift/api/image/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/image/OWNERS b/vendor/github.com/openshift/api/image/OWNERS
new file mode 100644
index 0000000000..c12602811e
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/OWNERS
@@ -0,0 +1,5 @@
+reviewers:
+ - bparees
+ - dmage
+ - legionus
+ - miminar
diff --git a/vendor/github.com/openshift/api/image/docker10/doc.go b/vendor/github.com/openshift/api/image/docker10/doc.go
new file mode 100644
index 0000000000..cc194d24db
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/docker10/doc.go
@@ -0,0 +1,4 @@
+// +k8s:deepcopy-gen=package,register
+
+// Package docker10 is the docker10 version of the API.
+package docker10
diff --git a/vendor/github.com/openshift/api/image/docker10/register.go b/vendor/github.com/openshift/api/image/docker10/register.go
new file mode 100644
index 0000000000..3d5ad268ae
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/docker10/register.go
@@ -0,0 +1,47 @@
+package docker10
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ GroupName = "image.openshift.io"
+ LegacyGroupName = ""
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var (
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"}
+ LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"}
+
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes)
+
+ AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme
+
+ // Install is a function which adds this version to a scheme
+ Install = SchemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &DockerImage{},
+ )
+ return nil
+}
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(LegacySchemeGroupVersion,
+ &DockerImage{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/image/docker10/types_docker.go b/vendor/github.com/openshift/api/image/docker10/types_docker.go
new file mode 100644
index 0000000000..03f0f67fcc
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/docker10/types_docker.go
@@ -0,0 +1,60 @@
+package docker10
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DockerImage is the type representing a container image and its various properties when
+// retrieved from the Docker client API.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type DockerImage struct {
+ metav1.TypeMeta `json:",inline"`
+
+ ID string `json:"Id"`
+ Parent string `json:"Parent,omitempty"`
+ Comment string `json:"Comment,omitempty"`
+ Created metav1.Time `json:"Created,omitempty"`
+ Container string `json:"Container,omitempty"`
+ ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"`
+ DockerVersion string `json:"DockerVersion,omitempty"`
+ Author string `json:"Author,omitempty"`
+ Config *DockerConfig `json:"Config,omitempty"`
+ Architecture string `json:"Architecture,omitempty"`
+ Size int64 `json:"Size,omitempty"`
+}
+
+// DockerConfig is the list of configuration options used when creating a container.
+type DockerConfig struct {
+ Hostname string `json:"Hostname,omitempty"`
+ Domainname string `json:"Domainname,omitempty"`
+ User string `json:"User,omitempty"`
+ Memory int64 `json:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty"`
+ PortSpecs []string `json:"PortSpecs,omitempty"`
+ ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
+ Tty bool `json:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty"`
+ Env []string `json:"Env,omitempty"`
+ Cmd []string `json:"Cmd,omitempty"`
+ DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only
+ Image string `json:"Image,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty"`
+ VolumesFrom string `json:"VolumesFrom,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty"`
+ Entrypoint []string `json:"Entrypoint,omitempty"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty"`
+ SecurityOpts []string `json:"SecurityOpts,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..2ce8330b2c
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go
@@ -0,0 +1,114 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package docker10
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerConfig) DeepCopyInto(out *DockerConfig) {
+ *out = *in
+ if in.PortSpecs != nil {
+ in, out := &in.PortSpecs, &out.PortSpecs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExposedPorts != nil {
+ in, out := &in.ExposedPorts, &out.ExposedPorts
+ *out = make(map[string]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Cmd != nil {
+ in, out := &in.Cmd, &out.Cmd
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make(map[string]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Entrypoint != nil {
+ in, out := &in.Entrypoint, &out.Entrypoint
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityOpts != nil {
+ in, out := &in.SecurityOpts, &out.SecurityOpts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.OnBuild != nil {
+ in, out := &in.OnBuild, &out.OnBuild
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig.
+func (in *DockerConfig) DeepCopy() *DockerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerImage) DeepCopyInto(out *DockerImage) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Created.DeepCopyInto(&out.Created)
+ in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(DockerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage.
+func (in *DockerImage) DeepCopy() *DockerImage {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DockerImage) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..e818f784ab
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,30 @@
+package docker10
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_DockerConfig = map[string]string{
+ "": "DockerConfig is the list of configuration options used when creating a container.",
+}
+
+func (DockerConfig) SwaggerDoc() map[string]string {
+ return map_DockerConfig
+}
+
+var map_DockerImage = map[string]string{
+ "": "DockerImage is the type representing a container image and its various properties when retrieved from the Docker client API.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (DockerImage) SwaggerDoc() map[string]string {
+ return map_DockerImage
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go
new file mode 100644
index 0000000000..ddeb4403c4
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go
@@ -0,0 +1,18 @@
+package dockerpre012
+
+// DeepCopyInto is manually built to copy the (probably bugged) time.Time
+func (in *ImagePre012) DeepCopyInto(out *ImagePre012) {
+ *out = *in
+ out.Created = in.Created
+ in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ if *in == nil {
+ *out = nil
+ } else {
+ *out = new(Config)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/openshift/api/image/dockerpre012/doc.go b/vendor/github.com/openshift/api/image/dockerpre012/doc.go
new file mode 100644
index 0000000000..e4a56260f1
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/dockerpre012/doc.go
@@ -0,0 +1,4 @@
+// +k8s:deepcopy-gen=package,register
+
+// Package dockerpre012 is the dockerpre012 version of the API.
+package dockerpre012
diff --git a/vendor/github.com/openshift/api/image/dockerpre012/register.go b/vendor/github.com/openshift/api/image/dockerpre012/register.go
new file mode 100644
index 0000000000..7ce2adb0ad
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/dockerpre012/register.go
@@ -0,0 +1,46 @@
+package dockerpre012
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ GroupName = "image.openshift.io"
+ LegacyGroupName = ""
+)
+
+var (
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "pre012"}
+ LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "pre012"}
+
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+
+ LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes)
+ AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme
+
+ // Install is a function which adds this version to a scheme
+ Install = SchemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &DockerImage{},
+ )
+ return nil
+}
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(LegacySchemeGroupVersion,
+ &DockerImage{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go b/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go
new file mode 100644
index 0000000000..1111892a97
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go
@@ -0,0 +1,140 @@
+package dockerpre012
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the
+// version of metadata that the container image registry uses to persist metadata.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type DockerImage struct {
+ metav1.TypeMeta `json:",inline"`
+
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created metav1.Time `json:"created"`
+ Container string `json:"container,omitempty"`
+ ContainerConfig DockerConfig `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ Config *DockerConfig `json:"config,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ Size int64 `json:"size,omitempty"`
+}
+
+// DockerConfig is the list of configuration options used when creating a container.
+type DockerConfig struct {
+ Hostname string `json:"Hostname,omitempty"`
+ Domainname string `json:"Domainname,omitempty"`
+ User string `json:"User,omitempty"`
+ Memory int64 `json:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty"`
+ PortSpecs []string `json:"PortSpecs,omitempty"`
+ ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
+ Tty bool `json:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty"`
+ Env []string `json:"Env,omitempty"`
+ Cmd []string `json:"Cmd,omitempty"`
+ DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only
+ Image string `json:"Image,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty"`
+ VolumesFrom string `json:"VolumesFrom,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty"`
+ Entrypoint []string `json:"Entrypoint,omitempty"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty"`
+ SecurityOpts []string `json:"SecurityOpts,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty"`
+ // This field is not supported in pre012 and will always be empty.
+ Labels map[string]string `json:"Labels,omitempty"`
+}
+
+// ImagePre012 serves the same purpose as the Image type except that it is for
+// earlier versions of the Docker API (pre-012 to be specific)
+// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
+type ImagePre012 struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ Container string `json:"container,omitempty"`
+ ContainerConfig Config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ Config *Config `json:"config,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ Size int64 `json:"size,omitempty"`
+}
+
+// Config is the list of configuration options used when creating a container.
+// Config does not contain the options that are specific to starting a container on a
+// given host. Those are contained in HostConfig
+// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
+type Config struct {
+ Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
+ Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
+ MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"`
+ KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"`
+ PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
+ ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
+ StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
+ Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
+ Cmd []string `json:"Cmd" yaml:"Cmd"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
+ VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
+ Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
+ SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// Mount represents a mount point in the container.
+//
+// It has been added in the version 1.20 of the Docker API, available since
+// Docker 1.8.
+// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
+type Mount struct {
+ Name string
+ Source string
+ Destination string
+ Driver string
+ Mode string
+ RW bool
+}
+
+// Port represents the port number and the protocol, in the form
+// /. For example: 80/tcp.
+// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient
+type Port string
diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..0e8ecb20d5
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go
@@ -0,0 +1,217 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package dockerpre012
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ if in.PortSpecs != nil {
+ in, out := &in.PortSpecs, &out.PortSpecs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExposedPorts != nil {
+ in, out := &in.ExposedPorts, &out.ExposedPorts
+ *out = make(map[Port]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Cmd != nil {
+ in, out := &in.Cmd, &out.Cmd
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make(map[string]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Entrypoint != nil {
+ in, out := &in.Entrypoint, &out.Entrypoint
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityOpts != nil {
+ in, out := &in.SecurityOpts, &out.SecurityOpts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.OnBuild != nil {
+ in, out := &in.OnBuild, &out.OnBuild
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Mounts != nil {
+ in, out := &in.Mounts, &out.Mounts
+ *out = make([]Mount, len(*in))
+ copy(*out, *in)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerConfig) DeepCopyInto(out *DockerConfig) {
+ *out = *in
+ if in.PortSpecs != nil {
+ in, out := &in.PortSpecs, &out.PortSpecs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExposedPorts != nil {
+ in, out := &in.ExposedPorts, &out.ExposedPorts
+ *out = make(map[string]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Cmd != nil {
+ in, out := &in.Cmd, &out.Cmd
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Volumes != nil {
+ in, out := &in.Volumes, &out.Volumes
+ *out = make(map[string]struct{}, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Entrypoint != nil {
+ in, out := &in.Entrypoint, &out.Entrypoint
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityOpts != nil {
+ in, out := &in.SecurityOpts, &out.SecurityOpts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.OnBuild != nil {
+ in, out := &in.OnBuild, &out.OnBuild
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig.
+func (in *DockerConfig) DeepCopy() *DockerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerImage) DeepCopyInto(out *DockerImage) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Created.DeepCopyInto(&out.Created)
+ in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(DockerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage.
+func (in *DockerImage) DeepCopy() *DockerImage {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DockerImage) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePre012.
+func (in *ImagePre012) DeepCopy() *ImagePre012 {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePre012)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Mount) DeepCopyInto(out *Mount) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount.
+func (in *Mount) DeepCopy() *Mount {
+ if in == nil {
+ return nil
+ }
+ out := new(Mount)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..04900e809c
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,55 @@
+package dockerpre012
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Config = map[string]string{
+ "": "Config is the list of configuration options used when creating a container. Config does not contain the options that are specific to starting a container on a given host. Those are contained in HostConfig Exists only for legacy conversion, copy of type from fsouza/go-dockerclient",
+}
+
+func (Config) SwaggerDoc() map[string]string {
+ return map_Config
+}
+
+var map_DockerConfig = map[string]string{
+ "": "DockerConfig is the list of configuration options used when creating a container.",
+ "Labels": "This field is not supported in pre012 and will always be empty.",
+}
+
+func (DockerConfig) SwaggerDoc() map[string]string {
+ return map_DockerConfig
+}
+
+var map_DockerImage = map[string]string{
+ "": "DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the version of metadata that the container image registry uses to persist metadata.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (DockerImage) SwaggerDoc() map[string]string {
+ return map_DockerImage
+}
+
+var map_ImagePre012 = map[string]string{
+ "": "ImagePre012 serves the same purpose as the Image type except that it is for earlier versions of the Docker API (pre-012 to be specific) Exists only for legacy conversion, copy of type from fsouza/go-dockerclient",
+}
+
+func (ImagePre012) SwaggerDoc() map[string]string {
+ return map_ImagePre012
+}
+
+var map_Mount = map[string]string{
+ "": "Mount represents a mount point in the container.\n\nIt has been added in the version 1.20 of the Docker API, available since Docker 1.8. Exists only for legacy conversion, copy of type from fsouza/go-dockerclient",
+}
+
+func (Mount) SwaggerDoc() map[string]string {
+ return map_Mount
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/image/install.go b/vendor/github.com/openshift/api/image/install.go
new file mode 100644
index 0000000000..5b146faa7e
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/install.go
@@ -0,0 +1,26 @@
+package image
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ imagev1 "github.com/openshift/api/image/v1"
+)
+
+const (
+ GroupName = "image.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(imagev1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/image/v1/consts.go b/vendor/github.com/openshift/api/image/v1/consts.go
new file mode 100644
index 0000000000..11f57a44a3
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/consts.go
@@ -0,0 +1,69 @@
+package v1
+
+import corev1 "k8s.io/api/core/v1"
+
+const (
+ // ManagedByOpenShiftAnnotation indicates that an image is managed by OpenShift's registry.
+ ManagedByOpenShiftAnnotation = "openshift.io/image.managed"
+
+ // DockerImageRepositoryCheckAnnotation indicates that OpenShift has
+ // attempted to import tag and image information from an external Docker
+ // image repository.
+ DockerImageRepositoryCheckAnnotation = "openshift.io/image.dockerRepositoryCheck"
+
+ // InsecureRepositoryAnnotation may be set true on an image stream to allow insecure access to pull content.
+ InsecureRepositoryAnnotation = "openshift.io/image.insecureRepository"
+
+ // ExcludeImageSecretAnnotation indicates that a secret should not be returned by imagestream/secrets.
+ ExcludeImageSecretAnnotation = "openshift.io/image.excludeSecret"
+
+ // DockerImageLayersOrderAnnotation describes layers order in the docker image.
+ DockerImageLayersOrderAnnotation = "image.openshift.io/dockerLayersOrder"
+
+ // DockerImageLayersOrderAscending indicates that image layers are sorted in
+ // the order of their addition (from oldest to latest)
+ DockerImageLayersOrderAscending = "ascending"
+
+ // DockerImageLayersOrderDescending indicates that layers are sorted in
+ // reversed order of their addition (from newest to oldest).
+ DockerImageLayersOrderDescending = "descending"
+
+ // ImporterPreferArchAnnotation represents an architecture that should be
+ // selected if an image uses a manifest list and it should be
+ // downconverted.
+ ImporterPreferArchAnnotation = "importer.image.openshift.io/prefer-arch"
+
+ // ImporterPreferOSAnnotation represents an operation system that should
+ // be selected if an image uses a manifest list and it should be
+ // downconverted.
+ ImporterPreferOSAnnotation = "importer.image.openshift.io/prefer-os"
+
+ // ImageManifestBlobStoredAnnotation indicates that manifest and config blobs of image are stored in on
+ // storage of integrated Docker registry.
+ ImageManifestBlobStoredAnnotation = "image.openshift.io/manifestBlobStored"
+
+ // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use.
+ DefaultImageTag = "latest"
+
+ // ResourceImageStreams represents a number of image streams in a project.
+ ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams"
+
+ // ResourceImageStreamImages represents a number of unique references to images in all image stream
+ // statuses of a project.
+ ResourceImageStreamImages corev1.ResourceName = "openshift.io/images"
+
+ // ResourceImageStreamTags represents a number of unique references to images in all image stream specs
+ // of a project.
+ ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags"
+
+ // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set
+ // the maximum size of an image.
+ LimitTypeImage corev1.LimitType = "openshift.io/Image"
+
+ // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number
+ // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags".
+ LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream"
+
+ // The supported type of image signature.
+ ImageSignatureTypeAtomicImageV1 string = "AtomicImageV1"
+)
diff --git a/vendor/github.com/openshift/api/image/v1/doc.go b/vendor/github.com/openshift/api/image/v1/doc.go
new file mode 100644
index 0000000000..e57d45bbf9
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/image/apis/image
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=image.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/image/v1/generated.pb.go b/vendor/github.com/openshift/api/image/v1/generated.pb.go
new file mode 100644
index 0000000000..ac776ad64d
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/generated.pb.go
@@ -0,0 +1,11572 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/image/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *DockerImageReference) Reset() { *m = DockerImageReference{} }
+func (*DockerImageReference) ProtoMessage() {}
+func (*DockerImageReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{0}
+}
+func (m *DockerImageReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DockerImageReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *DockerImageReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DockerImageReference.Merge(m, src)
+}
+func (m *DockerImageReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *DockerImageReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_DockerImageReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DockerImageReference proto.InternalMessageInfo
+
+func (m *Image) Reset() { *m = Image{} }
+func (*Image) ProtoMessage() {}
+func (*Image) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{1}
+}
+func (m *Image) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Image) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Image.Merge(m, src)
+}
+func (m *Image) XXX_Size() int {
+ return m.Size()
+}
+func (m *Image) XXX_DiscardUnknown() {
+ xxx_messageInfo_Image.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Image proto.InternalMessageInfo
+
+func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} }
+func (*ImageBlobReferences) ProtoMessage() {}
+func (*ImageBlobReferences) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{2}
+}
+func (m *ImageBlobReferences) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageBlobReferences) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageBlobReferences) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageBlobReferences.Merge(m, src)
+}
+func (m *ImageBlobReferences) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageBlobReferences) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageBlobReferences.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageBlobReferences proto.InternalMessageInfo
+
+func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} }
+func (*ImageImportSpec) ProtoMessage() {}
+func (*ImageImportSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{3}
+}
+func (m *ImageImportSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageImportSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageImportSpec.Merge(m, src)
+}
+func (m *ImageImportSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageImportSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageImportSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageImportSpec proto.InternalMessageInfo
+
+func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} }
+func (*ImageImportStatus) ProtoMessage() {}
+func (*ImageImportStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{4}
+}
+func (m *ImageImportStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageImportStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageImportStatus.Merge(m, src)
+}
+func (m *ImageImportStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageImportStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageImportStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageImportStatus proto.InternalMessageInfo
+
+func (m *ImageLayer) Reset() { *m = ImageLayer{} }
+func (*ImageLayer) ProtoMessage() {}
+func (*ImageLayer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{5}
+}
+func (m *ImageLayer) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageLayer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageLayer.Merge(m, src)
+}
+func (m *ImageLayer) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageLayer) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageLayer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageLayer proto.InternalMessageInfo
+
+func (m *ImageLayerData) Reset() { *m = ImageLayerData{} }
+func (*ImageLayerData) ProtoMessage() {}
+func (*ImageLayerData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{6}
+}
+func (m *ImageLayerData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageLayerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageLayerData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageLayerData.Merge(m, src)
+}
+func (m *ImageLayerData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageLayerData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageLayerData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageLayerData proto.InternalMessageInfo
+
+func (m *ImageList) Reset() { *m = ImageList{} }
+func (*ImageList) ProtoMessage() {}
+func (*ImageList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{7}
+}
+func (m *ImageList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageList.Merge(m, src)
+}
+func (m *ImageList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageList proto.InternalMessageInfo
+
+func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} }
+func (*ImageLookupPolicy) ProtoMessage() {}
+func (*ImageLookupPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{8}
+}
+func (m *ImageLookupPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageLookupPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageLookupPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageLookupPolicy.Merge(m, src)
+}
+func (m *ImageLookupPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageLookupPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageLookupPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageLookupPolicy proto.InternalMessageInfo
+
+func (m *ImageManifest) Reset() { *m = ImageManifest{} }
+func (*ImageManifest) ProtoMessage() {}
+func (*ImageManifest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{9}
+}
+func (m *ImageManifest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageManifest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageManifest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageManifest.Merge(m, src)
+}
+func (m *ImageManifest) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageManifest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageManifest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageManifest proto.InternalMessageInfo
+
+func (m *ImageSignature) Reset() { *m = ImageSignature{} }
+func (*ImageSignature) ProtoMessage() {}
+func (*ImageSignature) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{10}
+}
+func (m *ImageSignature) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageSignature) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageSignature.Merge(m, src)
+}
+func (m *ImageSignature) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageSignature) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageSignature.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageSignature proto.InternalMessageInfo
+
+func (m *ImageStream) Reset() { *m = ImageStream{} }
+func (*ImageStream) ProtoMessage() {}
+func (*ImageStream) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{11}
+}
+func (m *ImageStream) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStream) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStream.Merge(m, src)
+}
+func (m *ImageStream) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStream) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStream.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStream proto.InternalMessageInfo
+
+func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} }
+func (*ImageStreamImage) ProtoMessage() {}
+func (*ImageStreamImage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{12}
+}
+func (m *ImageStreamImage) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamImage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamImage.Merge(m, src)
+}
+func (m *ImageStreamImage) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamImage) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamImage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamImage proto.InternalMessageInfo
+
+func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} }
+func (*ImageStreamImport) ProtoMessage() {}
+func (*ImageStreamImport) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{13}
+}
+func (m *ImageStreamImport) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamImport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamImport) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamImport.Merge(m, src)
+}
+func (m *ImageStreamImport) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamImport) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamImport.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamImport proto.InternalMessageInfo
+
+func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} }
+func (*ImageStreamImportSpec) ProtoMessage() {}
+func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{14}
+}
+func (m *ImageStreamImportSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamImportSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamImportSpec.Merge(m, src)
+}
+func (m *ImageStreamImportSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamImportSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamImportSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamImportSpec proto.InternalMessageInfo
+
+func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} }
+func (*ImageStreamImportStatus) ProtoMessage() {}
+func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{15}
+}
+func (m *ImageStreamImportStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamImportStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamImportStatus.Merge(m, src)
+}
+func (m *ImageStreamImportStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamImportStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamImportStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamImportStatus proto.InternalMessageInfo
+
+func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} }
+func (*ImageStreamLayers) ProtoMessage() {}
+func (*ImageStreamLayers) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{16}
+}
+func (m *ImageStreamLayers) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamLayers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamLayers) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamLayers.Merge(m, src)
+}
+func (m *ImageStreamLayers) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamLayers) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamLayers.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamLayers proto.InternalMessageInfo
+
+func (m *ImageStreamList) Reset() { *m = ImageStreamList{} }
+func (*ImageStreamList) ProtoMessage() {}
+func (*ImageStreamList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{17}
+}
+func (m *ImageStreamList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamList.Merge(m, src)
+}
+func (m *ImageStreamList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamList proto.InternalMessageInfo
+
+func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} }
+func (*ImageStreamMapping) ProtoMessage() {}
+func (*ImageStreamMapping) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{18}
+}
+func (m *ImageStreamMapping) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamMapping) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamMapping.Merge(m, src)
+}
+func (m *ImageStreamMapping) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamMapping) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamMapping.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamMapping proto.InternalMessageInfo
+
+func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} }
+func (*ImageStreamSpec) ProtoMessage() {}
+func (*ImageStreamSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{19}
+}
+func (m *ImageStreamSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamSpec.Merge(m, src)
+}
+func (m *ImageStreamSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamSpec proto.InternalMessageInfo
+
+func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} }
+func (*ImageStreamStatus) ProtoMessage() {}
+func (*ImageStreamStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{20}
+}
+func (m *ImageStreamStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamStatus.Merge(m, src)
+}
+func (m *ImageStreamStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamStatus proto.InternalMessageInfo
+
+func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} }
+func (*ImageStreamTag) ProtoMessage() {}
+func (*ImageStreamTag) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{21}
+}
+func (m *ImageStreamTag) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamTag) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamTag.Merge(m, src)
+}
+func (m *ImageStreamTag) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamTag) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamTag.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamTag proto.InternalMessageInfo
+
+func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} }
+func (*ImageStreamTagList) ProtoMessage() {}
+func (*ImageStreamTagList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{22}
+}
+func (m *ImageStreamTagList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageStreamTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageStreamTagList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageStreamTagList.Merge(m, src)
+}
+func (m *ImageStreamTagList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageStreamTagList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageStreamTagList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageStreamTagList proto.InternalMessageInfo
+
+func (m *ImageTag) Reset() { *m = ImageTag{} }
+func (*ImageTag) ProtoMessage() {}
+func (*ImageTag) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{23}
+}
+func (m *ImageTag) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageTag) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageTag.Merge(m, src)
+}
+func (m *ImageTag) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageTag) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageTag.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageTag proto.InternalMessageInfo
+
+func (m *ImageTagList) Reset() { *m = ImageTagList{} }
+func (*ImageTagList) ProtoMessage() {}
+func (*ImageTagList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{24}
+}
+func (m *ImageTagList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ImageTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ImageTagList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ImageTagList.Merge(m, src)
+}
+func (m *ImageTagList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ImageTagList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ImageTagList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageTagList proto.InternalMessageInfo
+
+func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} }
+func (*NamedTagEventList) ProtoMessage() {}
+func (*NamedTagEventList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{25}
+}
+func (m *NamedTagEventList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedTagEventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedTagEventList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedTagEventList.Merge(m, src)
+}
+func (m *NamedTagEventList) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedTagEventList) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedTagEventList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedTagEventList proto.InternalMessageInfo
+
+func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} }
+func (*RepositoryImportSpec) ProtoMessage() {}
+func (*RepositoryImportSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{26}
+}
+func (m *RepositoryImportSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RepositoryImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RepositoryImportSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RepositoryImportSpec.Merge(m, src)
+}
+func (m *RepositoryImportSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *RepositoryImportSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_RepositoryImportSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RepositoryImportSpec proto.InternalMessageInfo
+
+func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} }
+func (*RepositoryImportStatus) ProtoMessage() {}
+func (*RepositoryImportStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{27}
+}
+func (m *RepositoryImportStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RepositoryImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RepositoryImportStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RepositoryImportStatus.Merge(m, src)
+}
+func (m *RepositoryImportStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *RepositoryImportStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_RepositoryImportStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RepositoryImportStatus proto.InternalMessageInfo
+
+func (m *SecretList) Reset() { *m = SecretList{} }
+func (*SecretList) ProtoMessage() {}
+func (*SecretList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{28}
+}
+func (m *SecretList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SecretList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SecretList.Merge(m, src)
+}
+func (m *SecretList) XXX_Size() int {
+ return m.Size()
+}
+func (m *SecretList) XXX_DiscardUnknown() {
+ xxx_messageInfo_SecretList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretList proto.InternalMessageInfo
+
+func (m *SignatureCondition) Reset() { *m = SignatureCondition{} }
+func (*SignatureCondition) ProtoMessage() {}
+func (*SignatureCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{29}
+}
+func (m *SignatureCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignatureCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SignatureCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignatureCondition.Merge(m, src)
+}
+func (m *SignatureCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignatureCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignatureCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignatureCondition proto.InternalMessageInfo
+
+func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} }
+func (*SignatureGenericEntity) ProtoMessage() {}
+func (*SignatureGenericEntity) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{30}
+}
+func (m *SignatureGenericEntity) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignatureGenericEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SignatureGenericEntity) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignatureGenericEntity.Merge(m, src)
+}
+func (m *SignatureGenericEntity) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignatureGenericEntity) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignatureGenericEntity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignatureGenericEntity proto.InternalMessageInfo
+
+func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} }
+func (*SignatureIssuer) ProtoMessage() {}
+func (*SignatureIssuer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{31}
+}
+func (m *SignatureIssuer) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignatureIssuer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SignatureIssuer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignatureIssuer.Merge(m, src)
+}
+func (m *SignatureIssuer) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignatureIssuer) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignatureIssuer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignatureIssuer proto.InternalMessageInfo
+
+func (m *SignatureSubject) Reset() { *m = SignatureSubject{} }
+func (*SignatureSubject) ProtoMessage() {}
+func (*SignatureSubject) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{32}
+}
+func (m *SignatureSubject) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignatureSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SignatureSubject) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignatureSubject.Merge(m, src)
+}
+func (m *SignatureSubject) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignatureSubject) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignatureSubject.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignatureSubject proto.InternalMessageInfo
+
+func (m *TagEvent) Reset() { *m = TagEvent{} }
+func (*TagEvent) ProtoMessage() {}
+func (*TagEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{33}
+}
+func (m *TagEvent) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TagEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TagEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TagEvent.Merge(m, src)
+}
+func (m *TagEvent) XXX_Size() int {
+ return m.Size()
+}
+func (m *TagEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_TagEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TagEvent proto.InternalMessageInfo
+
+func (m *TagEventCondition) Reset() { *m = TagEventCondition{} }
+func (*TagEventCondition) ProtoMessage() {}
+func (*TagEventCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{34}
+}
+func (m *TagEventCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TagEventCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TagEventCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TagEventCondition.Merge(m, src)
+}
+func (m *TagEventCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *TagEventCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_TagEventCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TagEventCondition proto.InternalMessageInfo
+
+func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} }
+func (*TagImportPolicy) ProtoMessage() {}
+func (*TagImportPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{35}
+}
+func (m *TagImportPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TagImportPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TagImportPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TagImportPolicy.Merge(m, src)
+}
+func (m *TagImportPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *TagImportPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_TagImportPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TagImportPolicy proto.InternalMessageInfo
+
+func (m *TagReference) Reset() { *m = TagReference{} }
+func (*TagReference) ProtoMessage() {}
+func (*TagReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{36}
+}
+func (m *TagReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TagReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TagReference.Merge(m, src)
+}
+func (m *TagReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *TagReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_TagReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TagReference proto.InternalMessageInfo
+
+func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} }
+func (*TagReferencePolicy) ProtoMessage() {}
+func (*TagReferencePolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_650a0b34f65fde60, []int{37}
+}
+func (m *TagReferencePolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TagReferencePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TagReferencePolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TagReferencePolicy.Merge(m, src)
+}
+func (m *TagReferencePolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *TagReferencePolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_TagReferencePolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TagReferencePolicy proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*DockerImageReference)(nil), "github.com.openshift.api.image.v1.DockerImageReference")
+ proto.RegisterType((*Image)(nil), "github.com.openshift.api.image.v1.Image")
+ proto.RegisterType((*ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageBlobReferences")
+ proto.RegisterType((*ImageImportSpec)(nil), "github.com.openshift.api.image.v1.ImageImportSpec")
+ proto.RegisterType((*ImageImportStatus)(nil), "github.com.openshift.api.image.v1.ImageImportStatus")
+ proto.RegisterType((*ImageLayer)(nil), "github.com.openshift.api.image.v1.ImageLayer")
+ proto.RegisterType((*ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageLayerData")
+ proto.RegisterType((*ImageList)(nil), "github.com.openshift.api.image.v1.ImageList")
+ proto.RegisterType((*ImageLookupPolicy)(nil), "github.com.openshift.api.image.v1.ImageLookupPolicy")
+ proto.RegisterType((*ImageManifest)(nil), "github.com.openshift.api.image.v1.ImageManifest")
+ proto.RegisterType((*ImageSignature)(nil), "github.com.openshift.api.image.v1.ImageSignature")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.ImageSignature.SignedClaimsEntry")
+ proto.RegisterType((*ImageStream)(nil), "github.com.openshift.api.image.v1.ImageStream")
+ proto.RegisterType((*ImageStreamImage)(nil), "github.com.openshift.api.image.v1.ImageStreamImage")
+ proto.RegisterType((*ImageStreamImport)(nil), "github.com.openshift.api.image.v1.ImageStreamImport")
+ proto.RegisterType((*ImageStreamImportSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamImportSpec")
+ proto.RegisterType((*ImageStreamImportStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamImportStatus")
+ proto.RegisterType((*ImageStreamLayers)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers")
+ proto.RegisterMapType((map[string]ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.BlobsEntry")
+ proto.RegisterMapType((map[string]ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.ImagesEntry")
+ proto.RegisterType((*ImageStreamList)(nil), "github.com.openshift.api.image.v1.ImageStreamList")
+ proto.RegisterType((*ImageStreamMapping)(nil), "github.com.openshift.api.image.v1.ImageStreamMapping")
+ proto.RegisterType((*ImageStreamSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamSpec")
+ proto.RegisterType((*ImageStreamStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamStatus")
+ proto.RegisterType((*ImageStreamTag)(nil), "github.com.openshift.api.image.v1.ImageStreamTag")
+ proto.RegisterType((*ImageStreamTagList)(nil), "github.com.openshift.api.image.v1.ImageStreamTagList")
+ proto.RegisterType((*ImageTag)(nil), "github.com.openshift.api.image.v1.ImageTag")
+ proto.RegisterType((*ImageTagList)(nil), "github.com.openshift.api.image.v1.ImageTagList")
+ proto.RegisterType((*NamedTagEventList)(nil), "github.com.openshift.api.image.v1.NamedTagEventList")
+ proto.RegisterType((*RepositoryImportSpec)(nil), "github.com.openshift.api.image.v1.RepositoryImportSpec")
+ proto.RegisterType((*RepositoryImportStatus)(nil), "github.com.openshift.api.image.v1.RepositoryImportStatus")
+ proto.RegisterType((*SecretList)(nil), "github.com.openshift.api.image.v1.SecretList")
+ proto.RegisterType((*SignatureCondition)(nil), "github.com.openshift.api.image.v1.SignatureCondition")
+ proto.RegisterType((*SignatureGenericEntity)(nil), "github.com.openshift.api.image.v1.SignatureGenericEntity")
+ proto.RegisterType((*SignatureIssuer)(nil), "github.com.openshift.api.image.v1.SignatureIssuer")
+ proto.RegisterType((*SignatureSubject)(nil), "github.com.openshift.api.image.v1.SignatureSubject")
+ proto.RegisterType((*TagEvent)(nil), "github.com.openshift.api.image.v1.TagEvent")
+ proto.RegisterType((*TagEventCondition)(nil), "github.com.openshift.api.image.v1.TagEventCondition")
+ proto.RegisterType((*TagImportPolicy)(nil), "github.com.openshift.api.image.v1.TagImportPolicy")
+ proto.RegisterType((*TagReference)(nil), "github.com.openshift.api.image.v1.TagReference")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.TagReference.AnnotationsEntry")
+ proto.RegisterType((*TagReferencePolicy)(nil), "github.com.openshift.api.image.v1.TagReferencePolicy")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/image/v1/generated.proto", fileDescriptor_650a0b34f65fde60)
+}
+
+var fileDescriptor_650a0b34f65fde60 = []byte{
+ // 2691 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7,
+ 0x15, 0xf6, 0xf2, 0x4f, 0xd4, 0x13, 0x25, 0x59, 0x63, 0xcb, 0x61, 0x68, 0x47, 0x92, 0xd7, 0xb5,
+ 0xe1, 0x34, 0x0e, 0x19, 0xa9, 0x4e, 0x2a, 0xbb, 0x40, 0x1d, 0xd3, 0x74, 0x0d, 0xb6, 0x62, 0xac,
+ 0x8c, 0x58, 0xa3, 0x35, 0x5c, 0xa0, 0xab, 0xe5, 0x68, 0xb5, 0x15, 0xb9, 0xcb, 0xee, 0x2e, 0x95,
+ 0xc8, 0x68, 0x81, 0xa2, 0x28, 0x82, 0x1c, 0x7a, 0x68, 0xcf, 0x39, 0x16, 0x41, 0x51, 0x14, 0xe8,
+ 0xa5, 0x68, 0xd0, 0x53, 0x2f, 0x4d, 0x01, 0xa3, 0xa7, 0x20, 0xe8, 0x21, 0x97, 0x0a, 0xb1, 0xda,
+ 0x73, 0x6f, 0xbd, 0xf8, 0x54, 0xcc, 0xcf, 0xfe, 0x72, 0x29, 0xed, 0xaa, 0x16, 0xdb, 0xdc, 0xc8,
+ 0x79, 0xef, 0x7d, 0x6f, 0xe6, 0xbd, 0x37, 0xef, 0xbd, 0x99, 0x59, 0x58, 0xd6, 0x74, 0x67, 0x7b,
+ 0xb0, 0x59, 0x55, 0xcd, 0x5e, 0xcd, 0xec, 0x13, 0xc3, 0xde, 0xd6, 0xb7, 0x9c, 0x9a, 0xd2, 0xd7,
+ 0x6b, 0x7a, 0x4f, 0xd1, 0x48, 0x6d, 0x77, 0xb9, 0xa6, 0x11, 0x83, 0x58, 0x8a, 0x43, 0x3a, 0xd5,
+ 0xbe, 0x65, 0x3a, 0x26, 0xba, 0xe8, 0x8b, 0x54, 0x3d, 0x91, 0xaa, 0xd2, 0xd7, 0xab, 0x4c, 0xa4,
+ 0xba, 0xbb, 0x5c, 0x79, 0x35, 0x80, 0xaa, 0x99, 0x9a, 0x59, 0x63, 0x92, 0x9b, 0x83, 0x2d, 0xf6,
+ 0x8f, 0xfd, 0x61, 0xbf, 0x38, 0x62, 0x45, 0xde, 0x59, 0xb5, 0xab, 0xba, 0xc9, 0xd4, 0xaa, 0xa6,
+ 0x15, 0xa7, 0xb5, 0x72, 0xdd, 0xe7, 0xe9, 0x29, 0xea, 0xb6, 0x6e, 0x10, 0x6b, 0xaf, 0xd6, 0xdf,
+ 0xd1, 0xe8, 0x80, 0x5d, 0xeb, 0x11, 0x47, 0x89, 0x93, 0xaa, 0x8d, 0x92, 0xb2, 0x06, 0x86, 0xa3,
+ 0xf7, 0xc8, 0x90, 0xc0, 0x1b, 0x47, 0x09, 0xd8, 0xea, 0x36, 0xe9, 0x29, 0x51, 0x39, 0xf9, 0x53,
+ 0x09, 0xce, 0x36, 0x4c, 0x75, 0x87, 0x58, 0x4d, 0x6a, 0x04, 0x4c, 0xb6, 0x88, 0x45, 0x0c, 0x95,
+ 0xa0, 0x6b, 0x50, 0xb4, 0x88, 0xa6, 0xdb, 0x8e, 0xb5, 0x57, 0x96, 0x96, 0xa4, 0xab, 0x93, 0xf5,
+ 0xd3, 0x4f, 0xf6, 0x17, 0x4f, 0x1d, 0xec, 0x2f, 0x16, 0xb1, 0x18, 0xc7, 0x1e, 0x07, 0xaa, 0xc1,
+ 0xa4, 0xa1, 0xf4, 0x88, 0xdd, 0x57, 0x54, 0x52, 0xce, 0x30, 0xf6, 0x39, 0xc1, 0x3e, 0xf9, 0x96,
+ 0x4b, 0xc0, 0x3e, 0x0f, 0x5a, 0x82, 0x1c, 0xfd, 0x53, 0xce, 0x32, 0xde, 0x92, 0xe0, 0xcd, 0x51,
+ 0x5e, 0xcc, 0x28, 0xe8, 0x25, 0xc8, 0x3a, 0x8a, 0x56, 0xce, 0x31, 0x86, 0x29, 0xc1, 0x90, 0x6d,
+ 0x2b, 0x1a, 0xa6, 0xe3, 0xa8, 0x02, 0x19, 0xbd, 0x51, 0xce, 0x33, 0x2a, 0x08, 0x6a, 0xa6, 0xd9,
+ 0xc0, 0x19, 0xbd, 0x21, 0xff, 0xad, 0x08, 0x79, 0xb6, 0x1c, 0xf4, 0x7d, 0x28, 0x52, 0x13, 0x77,
+ 0x14, 0x47, 0x61, 0xab, 0x98, 0x5a, 0x79, 0xad, 0xca, 0x2d, 0x55, 0x0d, 0x5a, 0xaa, 0xda, 0xdf,
+ 0xd1, 0xe8, 0x80, 0x5d, 0xa5, 0xdc, 0xd5, 0xdd, 0xe5, 0xea, 0xfd, 0xcd, 0x1f, 0x10, 0xd5, 0x69,
+ 0x11, 0x47, 0xa9, 0x23, 0x81, 0x0e, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x3a, 0x9c, 0xed, 0xc4, 0xd8,
+ 0x4f, 0x18, 0xe1, 0x82, 0x90, 0x8d, 0xb5, 0x31, 0x8e, 0x95, 0x44, 0x3f, 0x82, 0x33, 0x81, 0xf1,
+ 0x96, 0x3b, 0xfd, 0x2c, 0x9b, 0xfe, 0xab, 0x23, 0xa7, 0x2f, 0x1c, 0x5d, 0xc5, 0xca, 0x3b, 0x77,
+ 0xdf, 0x75, 0x88, 0x61, 0xeb, 0xa6, 0x51, 0x3f, 0x2f, 0xf4, 0x9f, 0x69, 0x0c, 0x23, 0xe2, 0x38,
+ 0x35, 0x68, 0x13, 0x2a, 0x31, 0xc3, 0x0f, 0x88, 0x45, 0xf1, 0x84, 0x37, 0x64, 0x81, 0x5a, 0x69,
+ 0x8c, 0xe4, 0xc4, 0x87, 0xa0, 0xa0, 0x56, 0x78, 0x85, 0x8a, 0xa1, 0x6f, 0x11, 0xdb, 0x11, 0xce,
+ 0x8c, 0x9d, 0xb2, 0x60, 0xc1, 0x71, 0x72, 0x68, 0x17, 0xe6, 0x02, 0xc3, 0x6b, 0xca, 0x1e, 0xb1,
+ 0xec, 0x72, 0x61, 0x29, 0xcb, 0xcc, 0x75, 0xe4, 0xa6, 0xaf, 0xfa, 0x52, 0xf5, 0x17, 0x85, 0xee,
+ 0xb9, 0x46, 0x14, 0x0f, 0x0f, 0xab, 0x40, 0x04, 0xc0, 0xd6, 0x35, 0x43, 0x71, 0x06, 0x16, 0xb1,
+ 0xcb, 0x13, 0x4c, 0xe1, 0x72, 0x52, 0x85, 0x1b, 0xae, 0xa4, 0x1f, 0x5f, 0xde, 0x90, 0x8d, 0x03,
+ 0xc0, 0xe8, 0x3e, 0xcc, 0x07, 0x74, 0xfb, 0x4c, 0xe5, 0xe2, 0x52, 0xf6, 0x6a, 0xa9, 0xfe, 0xe2,
+ 0xc1, 0xfe, 0xe2, 0x7c, 0x23, 0x8e, 0x01, 0xc7, 0xcb, 0xa1, 0x6d, 0xb8, 0x10, 0x63, 0xc6, 0x16,
+ 0xe9, 0xe8, 0x4a, 0x7b, 0xaf, 0x4f, 0xca, 0x93, 0xcc, 0x0f, 0x5f, 0x12, 0xd3, 0xba, 0xd0, 0x38,
+ 0x84, 0x17, 0x1f, 0x8a, 0x84, 0xee, 0x85, 0x3c, 0x73, 0xc7, 0x34, 0xb6, 0x74, 0xad, 0x0c, 0x0c,
+ 0x3e, 0xce, 0xd4, 0x9c, 0x01, 0x0f, 0xcb, 0xa0, 0x9f, 0x4a, 0xa1, 0x6d, 0xe6, 0x6a, 0xb2, 0xcb,
+ 0x53, 0xcc, 0xea, 0xaf, 0x25, 0xb5, 0xba, 0x2b, 0x18, 0xbb, 0x31, 0x3d, 0x54, 0x1c, 0xab, 0x4b,
+ 0xfe, 0x58, 0x82, 0x33, 0x6c, 0xa8, 0xde, 0x35, 0x37, 0xbd, 0xfd, 0x6a, 0xa3, 0x55, 0x28, 0x31,
+ 0x2d, 0x2d, 0xdd, 0xb6, 0x75, 0x43, 0x63, 0x3b, 0xb5, 0x58, 0x3f, 0x2b, 0x34, 0x94, 0x9a, 0x01,
+ 0x1a, 0x0e, 0x71, 0x22, 0x19, 0x0a, 0x5d, 0x1e, 0xae, 0xd2, 0x52, 0x96, 0x26, 0xb2, 0x83, 0xfd,
+ 0xc5, 0x82, 0x08, 0x38, 0x41, 0xa1, 0x3c, 0x2a, 0x37, 0x1c, 0x4f, 0x29, 0x8c, 0x47, 0x58, 0x4a,
+ 0x50, 0xd0, 0x2b, 0x30, 0xd9, 0xf3, 0x4c, 0x92, 0x63, 0x50, 0xd3, 0x34, 0xf5, 0xfa, 0x2b, 0xf2,
+ 0xe9, 0xf2, 0x5f, 0xb2, 0x30, 0xcb, 0xe6, 0xd4, 0xec, 0xf5, 0x4d, 0xcb, 0xd9, 0xe8, 0x13, 0x15,
+ 0xdd, 0x85, 0xdc, 0x96, 0x65, 0xf6, 0x44, 0x8e, 0xbc, 0x14, 0x48, 0x32, 0x55, 0x5a, 0xd8, 0xfc,
+ 0x8c, 0xe8, 0x2d, 0xdb, 0xcf, 0xd9, 0xdf, 0xb0, 0xcc, 0x1e, 0x66, 0xe2, 0xe8, 0x4d, 0xc8, 0x38,
+ 0x26, 0x9b, 0xe7, 0xd4, 0xca, 0xd5, 0x38, 0x90, 0x35, 0x53, 0x55, 0xba, 0x51, 0xa4, 0x02, 0x4d,
+ 0xdd, 0x6d, 0x13, 0x67, 0x1c, 0x13, 0x75, 0xa9, 0x2d, 0xe9, 0xb4, 0xd6, 0xcd, 0xae, 0xae, 0xee,
+ 0x89, 0xac, 0xb7, 0x92, 0xc0, 0xbf, 0x6d, 0x45, 0x6b, 0x06, 0x24, 0x83, 0xf6, 0xf7, 0x47, 0x71,
+ 0x08, 0x1d, 0xbd, 0x0b, 0xb3, 0x96, 0x3b, 0x0d, 0xa1, 0x30, 0xcf, 0x14, 0xbe, 0x9e, 0x4c, 0x21,
+ 0x0e, 0x0b, 0xd7, 0x5f, 0x10, 0x3a, 0x67, 0x23, 0x04, 0x1c, 0x55, 0x83, 0x6e, 0xc3, 0xac, 0x6e,
+ 0xa8, 0xdd, 0x41, 0xc7, 0x4f, 0x7f, 0x39, 0x16, 0x36, 0x1e, 0x44, 0x33, 0x4c, 0xc6, 0x51, 0x7e,
+ 0xf9, 0x77, 0x19, 0x98, 0x0b, 0xfa, 0xd1, 0x51, 0x9c, 0x81, 0x8d, 0xda, 0x50, 0xb0, 0xd9, 0x2f,
+ 0xe1, 0xcb, 0x6b, 0xc9, 0xea, 0x1d, 0x97, 0xae, 0xcf, 0x08, 0xed, 0x05, 0xfe, 0x1f, 0x0b, 0x2c,
+ 0xd4, 0x84, 0x3c, 0x5b, 0xb7, 0xe7, 0xdb, 0x84, 0xfb, 0xad, 0x3e, 0x79, 0xb0, 0xbf, 0xc8, 0x6b,
+ 0x31, 0xe6, 0x08, 0x6e, 0x5d, 0xcf, 0x8e, 0xa8, 0xeb, 0xdf, 0x8d, 0x86, 0x72, 0x1a, 0x6d, 0x5e,
+ 0xcf, 0x11, 0x1b, 0xf8, 0xef, 0x49, 0x00, 0x7e, 0xfe, 0xf6, 0x5a, 0x10, 0x69, 0x64, 0x0b, 0x72,
+ 0x19, 0x72, 0xb6, 0xfe, 0x98, 0x2f, 0x3a, 0xeb, 0x83, 0x33, 0xf1, 0x0d, 0xfd, 0x31, 0xc1, 0x8c,
+ 0x4c, 0x9b, 0x9f, 0x9e, 0x97, 0x3c, 0xb3, 0xe1, 0xe6, 0xc7, 0xcf, 0x94, 0x3e, 0x8f, 0xdc, 0x81,
+ 0x19, 0x7f, 0x1e, 0x0d, 0x5a, 0x75, 0x2f, 0x0a, 0x4d, 0x12, 0xd3, 0x34, 0x7d, 0xa4, 0x96, 0x4c,
+ 0x02, 0x2d, 0x7f, 0x94, 0x60, 0x92, 0xab, 0xd1, 0x6d, 0x07, 0x3d, 0x1a, 0xea, 0x84, 0xaa, 0xc9,
+ 0x22, 0x83, 0x4a, 0xb3, 0x3e, 0xc8, 0xeb, 0xff, 0xdc, 0x91, 0x40, 0x17, 0xd4, 0x82, 0xbc, 0xee,
+ 0x90, 0x9e, 0x5d, 0xce, 0xa4, 0xf4, 0xd8, 0xb4, 0x00, 0xcd, 0x37, 0xa9, 0x38, 0xe6, 0x28, 0xf2,
+ 0xaa, 0x88, 0xec, 0x35, 0xd3, 0xdc, 0x19, 0xf4, 0xc5, 0x96, 0xb9, 0x04, 0xf9, 0x2e, 0x4d, 0x1f,
+ 0x22, 0xbf, 0x7a, 0x92, 0x2c, 0xa7, 0x60, 0x4e, 0x93, 0x7f, 0x95, 0x81, 0xe9, 0x70, 0x77, 0x70,
+ 0x05, 0x0a, 0x1d, 0x5d, 0xa3, 0x1b, 0x8c, 0x3b, 0xda, 0x0b, 0xf1, 0x06, 0x1b, 0xc5, 0x82, 0x9a,
+ 0xda, 0xbe, 0x34, 0xed, 0xbb, 0xb1, 0x45, 0xdd, 0xc4, 0xa6, 0x95, 0xf5, 0xd3, 0x4e, 0x2b, 0x40,
+ 0xc3, 0x21, 0x4e, 0x2a, 0xa9, 0x58, 0xea, 0xb6, 0xee, 0x10, 0x95, 0x56, 0x64, 0xd1, 0x55, 0x79,
+ 0x92, 0xb7, 0x03, 0x34, 0x1c, 0xe2, 0xa4, 0x5d, 0xaf, 0x69, 0x47, 0xbb, 0xde, 0xfb, 0x1b, 0x38,
+ 0x63, 0xda, 0xe8, 0x65, 0x98, 0xd8, 0x55, 0x2c, 0x5d, 0x31, 0x9c, 0x72, 0x81, 0x31, 0xcc, 0x0a,
+ 0x86, 0x89, 0x07, 0x7c, 0x18, 0xbb, 0x74, 0xf9, 0xf7, 0x05, 0x11, 0x81, 0x5e, 0x57, 0x30, 0x86,
+ 0x4e, 0x79, 0x09, 0x72, 0x8e, 0x6f, 0x5b, 0x6f, 0xbf, 0x31, 0xb3, 0x32, 0x0a, 0xba, 0x0c, 0x13,
+ 0xaa, 0x69, 0x38, 0xc4, 0x70, 0x98, 0x31, 0x4b, 0xf5, 0x29, 0x3a, 0xfb, 0x3b, 0x7c, 0x08, 0xbb,
+ 0x34, 0xa4, 0x03, 0xa8, 0xa6, 0xd1, 0xd1, 0x1d, 0xdd, 0x34, 0xdc, 0x1c, 0x91, 0x24, 0x61, 0x7b,
+ 0x8b, 0xbd, 0xe3, 0x4a, 0xfb, 0x33, 0xf6, 0x86, 0x6c, 0x1c, 0x00, 0x47, 0x5f, 0x83, 0x69, 0x26,
+ 0xde, 0xec, 0x10, 0xc3, 0xd1, 0x9d, 0x3d, 0x61, 0xfa, 0x79, 0x21, 0xc6, 0x43, 0xcd, 0x25, 0xe2,
+ 0x30, 0x2f, 0xfa, 0x31, 0x94, 0x68, 0x1b, 0x47, 0x3a, 0x77, 0xba, 0x8a, 0xde, 0x73, 0x5b, 0xd2,
+ 0x3b, 0xa9, 0x3b, 0x44, 0x36, 0x71, 0x17, 0xe5, 0xae, 0xe1, 0x58, 0x81, 0xe2, 0x16, 0x24, 0xe1,
+ 0x90, 0x3a, 0xf4, 0x36, 0x4c, 0xa8, 0x16, 0xa1, 0x67, 0xbd, 0xf2, 0x04, 0x73, 0xe8, 0x97, 0x93,
+ 0x39, 0xb4, 0xad, 0xf7, 0x88, 0xb0, 0x3c, 0x17, 0xc7, 0x2e, 0x0e, 0x4d, 0x22, 0xba, 0x6d, 0x0f,
+ 0x48, 0xa7, 0xbe, 0x57, 0x2e, 0x26, 0xae, 0xcc, 0xde, 0x42, 0x9a, 0x54, 0xd6, 0xaa, 0x97, 0x68,
+ 0x12, 0x69, 0x0a, 0x1c, 0xec, 0x21, 0xa2, 0xef, 0xb9, 0xe8, 0x6d, 0x93, 0xf5, 0xa0, 0x53, 0x2b,
+ 0x5f, 0x49, 0x83, 0xbe, 0x31, 0x60, 0x51, 0x17, 0x84, 0x6f, 0x9b, 0xd8, 0x83, 0xac, 0xdc, 0x82,
+ 0xb9, 0x21, 0x43, 0xa2, 0xd3, 0x90, 0xdd, 0x21, 0xe2, 0x84, 0x8b, 0xe9, 0x4f, 0x74, 0x16, 0xf2,
+ 0xbb, 0x4a, 0x77, 0x20, 0xe2, 0x14, 0xf3, 0x3f, 0x37, 0x33, 0xab, 0x12, 0xcd, 0x2d, 0x53, 0xdc,
+ 0x33, 0x8e, 0x45, 0x94, 0xde, 0x18, 0xb6, 0x4c, 0x1b, 0x72, 0x76, 0x9f, 0xa8, 0xa2, 0xea, 0xae,
+ 0x24, 0x8e, 0x1c, 0x36, 0x3f, 0xda, 0xd8, 0xf9, 0xdb, 0x8c, 0xfe, 0xc3, 0x0c, 0x0d, 0x3d, 0xf2,
+ 0x5a, 0x04, 0xde, 0x5d, 0x5d, 0x4f, 0x89, 0x7b, 0x68, 0xab, 0x20, 0xff, 0x59, 0x82, 0xd3, 0x01,
+ 0xee, 0x71, 0x9d, 0xc3, 0x5b, 0xc7, 0xed, 0x50, 0xfc, 0x0a, 0x14, 0xe8, 0x52, 0xe4, 0x3f, 0xb8,
+ 0xcd, 0x95, 0xbb, 0x0a, 0xda, 0x62, 0x8d, 0x61, 0x19, 0x0f, 0x43, 0x1e, 0x5f, 0x4d, 0xe7, 0x19,
+ 0xbf, 0xa1, 0x8f, 0xf5, 0xfb, 0x66, 0xc4, 0xef, 0x37, 0x8f, 0x85, 0x7e, 0xb8, 0xf7, 0x7f, 0x96,
+ 0x81, 0xf9, 0xd8, 0x19, 0xd1, 0x3a, 0xcc, 0x7b, 0x6f, 0x66, 0xb9, 0xa2, 0x8f, 0xc0, 0x79, 0xb0,
+ 0xa0, 0x22, 0x0d, 0xc0, 0x22, 0x7d, 0xd3, 0xd6, 0x1d, 0xd3, 0xda, 0x13, 0x76, 0xf8, 0x6a, 0x82,
+ 0x99, 0x62, 0x4f, 0x28, 0x60, 0x86, 0x19, 0x6a, 0x68, 0x9f, 0x82, 0x03, 0xd0, 0xe8, 0x21, 0x9d,
+ 0x90, 0xa2, 0x11, 0x6a, 0x8e, 0x6c, 0x9a, 0xed, 0x15, 0xc4, 0xf7, 0x17, 0x41, 0x91, 0xb0, 0x40,
+ 0x94, 0x3f, 0xca, 0xc0, 0x0b, 0x23, 0x4c, 0x87, 0x70, 0xc8, 0x10, 0xb4, 0x0f, 0x4b, 0xe5, 0x06,
+ 0x7e, 0x00, 0x8c, 0x18, 0x4d, 0x8f, 0x31, 0xda, 0x8d, 0xe3, 0x18, 0x4d, 0x78, 0xf7, 0x10, 0xb3,
+ 0x3d, 0x8a, 0x98, 0xed, 0x7a, 0x4a, 0xb3, 0x45, 0xe2, 0x27, 0x62, 0xb8, 0x0f, 0x73, 0xa1, 0x7d,
+ 0x27, 0x6e, 0x5a, 0x4e, 0x7e, 0xdf, 0x75, 0x20, 0xbf, 0xd9, 0x35, 0x37, 0xdd, 0x06, 0xf6, 0x56,
+ 0x3a, 0x9f, 0xf0, 0x69, 0x56, 0xeb, 0x14, 0x81, 0x17, 0x68, 0x2f, 0xab, 0xb0, 0x31, 0xcc, 0xc1,
+ 0xd1, 0x76, 0xc4, 0x76, 0x6f, 0x1e, 0x4b, 0x0d, 0x37, 0x19, 0xd7, 0x33, 0xc2, 0x8e, 0x95, 0x1d,
+ 0x00, 0x7f, 0x36, 0x31, 0x55, 0xee, 0x5e, 0xb0, 0xca, 0xa5, 0xb8, 0xb6, 0xf2, 0x8e, 0x2c, 0x81,
+ 0xc2, 0x58, 0xf9, 0xa1, 0xa8, 0x8b, 0x23, 0xb5, 0xad, 0x85, 0xb5, 0xbd, 0x91, 0x38, 0x39, 0x87,
+ 0x2e, 0x5a, 0x82, 0xb5, 0xf8, 0x63, 0x49, 0x5c, 0x62, 0x08, 0xcb, 0x9c, 0xfc, 0x11, 0x67, 0x23,
+ 0x7c, 0xc4, 0x49, 0xbb, 0x6b, 0xe3, 0x0f, 0x3a, 0xff, 0x94, 0x00, 0x05, 0xb8, 0x5a, 0x4a, 0xbf,
+ 0xaf, 0x1b, 0xda, 0x17, 0xae, 0x5c, 0x1e, 0x71, 0xa8, 0x97, 0x7f, 0x93, 0x09, 0x79, 0x8b, 0xd5,
+ 0x03, 0x03, 0x4a, 0xdd, 0xc0, 0xf1, 0x2e, 0x6d, 0x2f, 0x12, 0x3c, 0x1a, 0xfa, 0xed, 0x70, 0x70,
+ 0x14, 0x87, 0xf0, 0xd1, 0x46, 0xe8, 0x1a, 0xd5, 0x4f, 0x6e, 0xe2, 0x58, 0xf8, 0x92, 0x80, 0x98,
+ 0x6f, 0xc4, 0x31, 0xe1, 0x78, 0x59, 0xf4, 0x36, 0xe4, 0x1c, 0x45, 0x73, 0x63, 0xa2, 0x96, 0xf2,
+ 0xd6, 0x28, 0x70, 0x08, 0x52, 0x34, 0x1b, 0x33, 0x28, 0xf9, 0xd7, 0xe1, 0xce, 0x43, 0x14, 0x8d,
+ 0x13, 0x99, 0x3d, 0x81, 0xf3, 0xfd, 0xc1, 0x66, 0x57, 0x57, 0x63, 0xa5, 0x84, 0x37, 0x2f, 0x09,
+ 0xe8, 0xf3, 0xeb, 0xa3, 0x59, 0xf1, 0x61, 0x38, 0xe8, 0x41, 0xc8, 0x48, 0x49, 0x3c, 0xfc, 0x96,
+ 0xd2, 0x23, 0x9d, 0xb6, 0xa2, 0xdd, 0xdd, 0x25, 0x86, 0x43, 0xf7, 0x62, 0xac, 0xa5, 0x3e, 0xc8,
+ 0xb9, 0xa7, 0x58, 0x66, 0xa9, 0xb6, 0x32, 0x8e, 0x8d, 0xf3, 0x4d, 0x1e, 0xe9, 0x7c, 0xdb, 0xa4,
+ 0x76, 0xf8, 0x44, 0xe8, 0xae, 0x6b, 0x05, 0x40, 0xbc, 0xc7, 0xe9, 0xa6, 0x21, 0xee, 0x0f, 0x3c,
+ 0xed, 0xf7, 0x3c, 0x0a, 0x0e, 0x70, 0x0d, 0x6d, 0x9b, 0xc2, 0x09, 0x6f, 0x9b, 0xed, 0x98, 0xc3,
+ 0xf6, 0xf5, 0x64, 0xcb, 0x66, 0xde, 0x4b, 0x7e, 0xd6, 0xf6, 0x52, 0x52, 0xfe, 0xb9, 0x74, 0xf0,
+ 0x7f, 0x0d, 0xa7, 0xd6, 0xb6, 0xa2, 0x8d, 0xa1, 0x48, 0x3c, 0x08, 0x17, 0x89, 0xe5, 0x74, 0x45,
+ 0xa2, 0xad, 0x68, 0x23, 0xea, 0xc4, 0xe7, 0x19, 0x28, 0x32, 0xc6, 0xf1, 0x04, 0x79, 0x2b, 0x74,
+ 0x0a, 0x49, 0x1d, 0xe5, 0xc5, 0xc8, 0xc1, 0xe3, 0x3b, 0xc7, 0x38, 0x70, 0x0e, 0xa7, 0x00, 0x38,
+ 0xec, 0x5e, 0x3a, 0xf7, 0xdf, 0xde, 0x4b, 0xcb, 0x7f, 0x92, 0xa0, 0xe4, 0x9a, 0x78, 0x0c, 0x91,
+ 0xb2, 0x1e, 0x8e, 0x94, 0x57, 0x92, 0xce, 0x7c, 0x74, 0x8c, 0xfc, 0x4b, 0x82, 0xb9, 0x21, 0xab,
+ 0xb9, 0x95, 0x59, 0x1a, 0x71, 0xdd, 0x7e, 0x8c, 0x69, 0xb8, 0xf0, 0xf1, 0xd3, 0x88, 0x24, 0x8c,
+ 0xec, 0xc9, 0x25, 0x0c, 0xf9, 0xfd, 0x2c, 0x9c, 0x8d, 0x3b, 0xf5, 0x3d, 0xaf, 0xd7, 0xac, 0xe8,
+ 0x5b, 0x54, 0x66, 0xdc, 0x6f, 0x51, 0xb9, 0xff, 0xd9, 0x5b, 0x54, 0x36, 0xe5, 0x5b, 0xd4, 0xfb,
+ 0x19, 0x38, 0x17, 0x7f, 0x96, 0x3c, 0xa1, 0x07, 0x29, 0xff, 0x14, 0x9a, 0x79, 0xfe, 0xa7, 0x50,
+ 0x74, 0x13, 0x66, 0x94, 0x0e, 0x0f, 0x33, 0xa5, 0x4b, 0x3b, 0x0e, 0x16, 0xc7, 0x93, 0x75, 0x74,
+ 0xb0, 0xbf, 0x38, 0x73, 0x3b, 0x44, 0xc1, 0x11, 0x4e, 0xf9, 0xb7, 0x12, 0xc0, 0x06, 0x51, 0x2d,
+ 0xe2, 0x8c, 0x21, 0x8b, 0xdc, 0x0a, 0x6f, 0xdf, 0x4a, 0x5c, 0xa8, 0xf3, 0xc9, 0x8c, 0x48, 0x1a,
+ 0x9f, 0x66, 0x01, 0x0d, 0xdf, 0x8b, 0xa3, 0x9b, 0xe2, 0xae, 0x9e, 0xa7, 0x8d, 0x2b, 0xc1, 0xbb,
+ 0xfa, 0x67, 0xfb, 0x8b, 0xe7, 0x86, 0x25, 0x02, 0xb7, 0xf8, 0x6b, 0x9e, 0xc3, 0xf9, 0x4d, 0xff,
+ 0xf5, 0xb0, 0x0b, 0x9f, 0xed, 0x2f, 0xc6, 0x7c, 0x37, 0x55, 0xf5, 0x90, 0x22, 0x8e, 0xd6, 0x60,
+ 0xba, 0xab, 0xd8, 0xce, 0xba, 0x65, 0x6e, 0x92, 0xb6, 0x2e, 0xbe, 0x18, 0x4a, 0x77, 0x97, 0xed,
+ 0xdd, 0xd6, 0xaf, 0x05, 0x81, 0x70, 0x18, 0x17, 0xed, 0x02, 0xa2, 0x03, 0x6d, 0x4b, 0x31, 0x6c,
+ 0xbe, 0x24, 0xaa, 0x2d, 0x97, 0x5a, 0x5b, 0x45, 0x68, 0x43, 0x6b, 0x43, 0x68, 0x38, 0x46, 0x03,
+ 0xba, 0x02, 0x05, 0x8b, 0x28, 0xb6, 0x69, 0x88, 0xb7, 0x05, 0x2f, 0x26, 0x31, 0x1b, 0xc5, 0x82,
+ 0x8a, 0x5e, 0x86, 0x89, 0x1e, 0xb1, 0x6d, 0x5a, 0xec, 0x22, 0xcf, 0x3b, 0x2d, 0x3e, 0x8c, 0x5d,
+ 0xba, 0xfc, 0x9e, 0x04, 0xbe, 0x8b, 0x58, 0x1f, 0xa9, 0xab, 0x77, 0xf9, 0x9b, 0xc4, 0x2a, 0x94,
+ 0x4c, 0x4b, 0x53, 0x0c, 0xfd, 0x31, 0x6f, 0x3a, 0xa5, 0xf0, 0xd3, 0xd3, 0xfd, 0x00, 0x0d, 0x87,
+ 0x38, 0x69, 0xb3, 0xaa, 0x9a, 0xbd, 0x9e, 0x69, 0xd0, 0x1a, 0x23, 0x5c, 0x1b, 0xc8, 0xd0, 0x2e,
+ 0x05, 0x07, 0xb8, 0xe4, 0x0f, 0x25, 0x98, 0x8d, 0xdc, 0xfe, 0xa3, 0x5f, 0x4a, 0x70, 0xce, 0x8e,
+ 0x9d, 0x9c, 0xd8, 0x1f, 0x37, 0xd2, 0x5c, 0xfa, 0x87, 0x00, 0xea, 0x0b, 0x62, 0x3e, 0x23, 0x56,
+ 0x8f, 0x47, 0x28, 0x96, 0xff, 0x2e, 0xc1, 0xe9, 0xe8, 0x3b, 0xc2, 0xff, 0xe3, 0x44, 0xd1, 0xeb,
+ 0x30, 0xc5, 0x4f, 0x5a, 0xdf, 0x22, 0x7b, 0xcd, 0x86, 0xf0, 0xc2, 0x19, 0x01, 0x36, 0xb5, 0xee,
+ 0x93, 0x70, 0x90, 0x4f, 0xfe, 0x79, 0x06, 0x8a, 0x6e, 0x7d, 0x45, 0xdf, 0xf6, 0xdf, 0x85, 0xa4,
+ 0xd4, 0xd1, 0xed, 0x05, 0xdd, 0xd0, 0xdb, 0xd0, 0xf3, 0xff, 0x10, 0xee, 0x92, 0xdb, 0xdc, 0xf1,
+ 0x83, 0x68, 0xfc, 0xcd, 0x43, 0xf8, 0x0c, 0x95, 0x4b, 0x72, 0x86, 0x92, 0x3f, 0xc8, 0xc2, 0xdc,
+ 0x50, 0xbb, 0x81, 0x6e, 0x84, 0x72, 0xde, 0xe5, 0x48, 0xce, 0x9b, 0x1f, 0x12, 0x38, 0xb1, 0x94,
+ 0x17, 0x9f, 0x89, 0xb2, 0x63, 0xcc, 0x44, 0xb9, 0xa4, 0x99, 0x28, 0x7f, 0x78, 0x26, 0x8a, 0x78,
+ 0xa7, 0x90, 0xc8, 0x3b, 0x1f, 0x49, 0x30, 0x1b, 0x69, 0xa0, 0xd0, 0x35, 0x28, 0xea, 0x86, 0x4d,
+ 0xd4, 0x81, 0x45, 0xc4, 0xf3, 0x81, 0x57, 0x15, 0x9b, 0x62, 0x1c, 0x7b, 0x1c, 0xa8, 0x06, 0x93,
+ 0xb6, 0xba, 0x4d, 0x3a, 0x83, 0x2e, 0xe9, 0x30, 0x8f, 0x14, 0xfd, 0xa7, 0xfc, 0x0d, 0x97, 0x80,
+ 0x7d, 0x1e, 0xd4, 0x00, 0xe0, 0xbd, 0x58, 0xcb, 0xec, 0xb8, 0xe1, 0xe6, 0x7e, 0xff, 0x06, 0x4d,
+ 0x8f, 0xf2, 0x6c, 0x7f, 0x71, 0xc6, 0xff, 0xc7, 0xfc, 0x1f, 0x90, 0x93, 0xff, 0x9d, 0x83, 0x52,
+ 0xb0, 0x11, 0x4b, 0xf0, 0x85, 0xc9, 0x3b, 0x30, 0xa5, 0x18, 0x86, 0xe9, 0x28, 0xbc, 0x5b, 0xce,
+ 0x24, 0xbe, 0x15, 0x0e, 0xea, 0xa9, 0xde, 0xf6, 0x21, 0xf8, 0xad, 0xb0, 0x97, 0x11, 0x02, 0x14,
+ 0x1c, 0xd4, 0x84, 0x6e, 0x8b, 0x16, 0x39, 0x9b, 0xbc, 0x45, 0x2e, 0x46, 0xda, 0xe3, 0x1a, 0x4c,
+ 0x7a, 0x9d, 0xa4, 0xf8, 0x78, 0xc9, 0xb3, 0xb2, 0xbf, 0xb5, 0x7d, 0x1e, 0x54, 0x0d, 0x05, 0x43,
+ 0x9e, 0x05, 0xc3, 0xcc, 0x21, 0x57, 0x1d, 0xd1, 0xfe, 0xbb, 0x30, 0xee, 0xfe, 0x7b, 0x62, 0x2c,
+ 0xfd, 0x77, 0xe5, 0xeb, 0x70, 0x3a, 0xea, 0xc1, 0x54, 0xef, 0xd2, 0xeb, 0x80, 0x86, 0xf5, 0x1f,
+ 0xd5, 0xc2, 0x0d, 0x4b, 0xf8, 0xf9, 0xac, 0x7e, 0xef, 0xc9, 0xd3, 0x85, 0x53, 0x9f, 0x3c, 0x5d,
+ 0x38, 0xf5, 0xd9, 0xd3, 0x85, 0x53, 0x3f, 0x39, 0x58, 0x90, 0x9e, 0x1c, 0x2c, 0x48, 0x9f, 0x1c,
+ 0x2c, 0x48, 0x9f, 0x1d, 0x2c, 0x48, 0x9f, 0x1f, 0x2c, 0x48, 0xbf, 0xf8, 0xc7, 0xc2, 0xa9, 0x87,
+ 0x17, 0x8f, 0xfc, 0x06, 0xff, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xa8, 0x38, 0xe0, 0xa7,
+ 0x2f, 0x00, 0x00,
+}
+
+func (m *DockerImageReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DockerImageReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DockerImageReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Tag)
+ copy(dAtA[i:], m.Tag)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Registry)
+ copy(dAtA[i:], m.Registry)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Image) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Image) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.DockerImageManifests) > 0 {
+ for iNdEx := len(m.DockerImageManifests) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DockerImageManifests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ }
+ i -= len(m.DockerImageConfig)
+ copy(dAtA[i:], m.DockerImageConfig)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageConfig)))
+ i--
+ dAtA[i] = 0x52
+ i -= len(m.DockerImageManifestMediaType)
+ copy(dAtA[i:], m.DockerImageManifestMediaType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifestMediaType)))
+ i--
+ dAtA[i] = 0x4a
+ if len(m.DockerImageSignatures) > 0 {
+ for iNdEx := len(m.DockerImageSignatures) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.DockerImageSignatures[iNdEx])
+ copy(dAtA[i:], m.DockerImageSignatures[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageSignatures[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if len(m.Signatures) > 0 {
+ for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.DockerImageLayers) > 0 {
+ for iNdEx := len(m.DockerImageLayers) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DockerImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i -= len(m.DockerImageManifest)
+ copy(dAtA[i:], m.DockerImageManifest)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifest)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.DockerImageMetadataVersion)
+ copy(dAtA[i:], m.DockerImageMetadataVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageMetadataVersion)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.DockerImageMetadata.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.DockerImageReference)
+ copy(dAtA[i:], m.DockerImageReference)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageBlobReferences) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageBlobReferences) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageBlobReferences) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Manifests) > 0 {
+ for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Manifests[iNdEx])
+ copy(dAtA[i:], m.Manifests[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manifests[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i--
+ if m.ImageMissing {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ if m.Config != nil {
+ i -= len(*m.Config)
+ copy(dAtA[i:], *m.Config)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Config)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Layers) > 0 {
+ for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Layers[iNdEx])
+ copy(dAtA[i:], m.Layers[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Layers[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageImportSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageImportSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ i--
+ if m.IncludeManifest {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ {
+ size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.To != nil {
+ {
+ size, err := m.To.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageImportStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageImportStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Manifests) > 0 {
+ for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Manifests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i -= len(m.Tag)
+ copy(dAtA[i:], m.Tag)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag)))
+ i--
+ dAtA[i] = 0x1a
+ if m.Image != nil {
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageLayer) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageLayer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.MediaType)
+ copy(dAtA[i:], m.MediaType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType)))
+ i--
+ dAtA[i] = 0x1a
+ i = encodeVarintGenerated(dAtA, i, uint64(m.LayerSize))
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageLayerData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageLayerData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageLayerData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.MediaType)
+ copy(dAtA[i:], m.MediaType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType)))
+ i--
+ dAtA[i] = 0x12
+ if m.LayerSize != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LayerSize))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageLookupPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageLookupPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageLookupPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Local {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageManifest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageManifest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageManifest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Variant)
+ copy(dAtA[i:], m.Variant)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Variant)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.OS)
+ copy(dAtA[i:], m.OS)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.OS)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Architecture)
+ copy(dAtA[i:], m.Architecture)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture)))
+ i--
+ dAtA[i] = 0x22
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ManifestSize))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.MediaType)
+ copy(dAtA[i:], m.MediaType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Digest)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageSignature) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageSignature) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.IssuedTo != nil {
+ {
+ size, err := m.IssuedTo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.IssuedBy != nil {
+ {
+ size, err := m.IssuedBy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Created != nil {
+ {
+ size, err := m.Created.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.SignedClaims) > 0 {
+ keysForSignedClaims := make([]string, 0, len(m.SignedClaims))
+ for k := range m.SignedClaims {
+ keysForSignedClaims = append(keysForSignedClaims, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims)
+ for iNdEx := len(keysForSignedClaims) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.SignedClaims[string(keysForSignedClaims[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForSignedClaims[iNdEx])
+ copy(dAtA[i:], keysForSignedClaims[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSignedClaims[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i -= len(m.ImageIdentity)
+ copy(dAtA[i:], m.ImageIdentity)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageIdentity)))
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.Content != nil {
+ i -= len(m.Content)
+ copy(dAtA[i:], m.Content)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Content)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStream) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStream) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStream) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamImage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamImage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamImage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamImport) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamImport) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamImport) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamImportSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamImportSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Images) > 0 {
+ for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Repository != nil {
+ {
+ size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i--
+ if m.Import {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamImportStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamImportStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Images) > 0 {
+ for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Repository != nil {
+ {
+ size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Import != nil {
+ {
+ size, err := m.Import.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamLayers) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamLayers) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamLayers) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Images) > 0 {
+ keysForImages := make([]string, 0, len(m.Images))
+ for k := range m.Images {
+ keysForImages = append(keysForImages, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForImages)
+ for iNdEx := len(keysForImages) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Images[string(keysForImages[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForImages[iNdEx])
+ copy(dAtA[i:], keysForImages[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForImages[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Blobs) > 0 {
+ keysForBlobs := make([]string, 0, len(m.Blobs))
+ for k := range m.Blobs {
+ keysForBlobs = append(keysForBlobs, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs)
+ for iNdEx := len(keysForBlobs) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Blobs[string(keysForBlobs[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForBlobs[iNdEx])
+ copy(dAtA[i:], keysForBlobs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBlobs[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamMapping) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamMapping) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Tag)
+ copy(dAtA[i:], m.Tag)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag)))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Tags) > 0 {
+ for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.DockerImageRepository)
+ copy(dAtA[i:], m.DockerImageRepository)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.PublicDockerImageRepository)
+ copy(dAtA[i:], m.PublicDockerImageRepository)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicDockerImageRepository)))
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Tags) > 0 {
+ for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.DockerImageRepository)
+ copy(dAtA[i:], m.DockerImageRepository)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamTag) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamTag) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
+ i--
+ dAtA[i] = 0x18
+ if m.Tag != nil {
+ {
+ size, err := m.Tag.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageStreamTagList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageStreamTagList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageStreamTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageTag) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageTag) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageTag) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Image != nil {
+ {
+ size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Status != nil {
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Spec != nil {
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ImageTagList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ImageTagList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NamedTagEventList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NamedTagEventList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamedTagEventList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Tag)
+ copy(dAtA[i:], m.Tag)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RepositoryImportSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RepositoryImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ i--
+ if m.IncludeManifest {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ {
+ size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RepositoryImportStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RepositoryImportStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RepositoryImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.AdditionalTags) > 0 {
+ for iNdEx := len(m.AdditionalTags) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AdditionalTags[iNdEx])
+ copy(dAtA[i:], m.AdditionalTags[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalTags[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Images) > 0 {
+ for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SignatureCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignatureCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignatureCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x2a
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SignatureGenericEntity) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignatureGenericEntity) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignatureGenericEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.CommonName)
+ copy(dAtA[i:], m.CommonName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommonName)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Organization)
+ copy(dAtA[i:], m.Organization)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organization)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SignatureIssuer) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignatureIssuer) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignatureIssuer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SignatureSubject) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignatureSubject) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignatureSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.PublicKeyID)
+ copy(dAtA[i:], m.PublicKeyID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicKeyID)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TagEvent) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TagEvent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TagEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
+ i--
+ dAtA[i] = 0x20
+ i -= len(m.Image)
+ copy(dAtA[i:], m.Image)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.DockerImageReference)
+ copy(dAtA[i:], m.DockerImageReference)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Created.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TagEventCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TagEventCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TagEventCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
+ i--
+ dAtA[i] = 0x30
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TagImportPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TagImportPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TagImportPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.ImportMode)
+ copy(dAtA[i:], m.ImportMode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImportMode)))
+ i--
+ dAtA[i] = 0x1a
+ i--
+ if m.Scheduled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i--
+ if m.Insecure {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *TagReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TagReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ if m.Generation != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Generation))
+ i--
+ dAtA[i] = 0x28
+ }
+ i--
+ if m.Reference {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ if m.From != nil {
+ {
+ size, err := m.From.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Annotations) > 0 {
+ keysForAnnotations := make([]string, 0, len(m.Annotations))
+ for k := range m.Annotations {
+ keysForAnnotations = append(keysForAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Annotations[string(keysForAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TagReferencePolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TagReferencePolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TagReferencePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *DockerImageReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Registry)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Tag)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ID)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Image) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DockerImageReference)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.DockerImageMetadata.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DockerImageMetadataVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DockerImageManifest)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.DockerImageLayers) > 0 {
+ for _, e := range m.DockerImageLayers {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Signatures) > 0 {
+ for _, e := range m.Signatures {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.DockerImageSignatures) > 0 {
+ for _, b := range m.DockerImageSignatures {
+ l = len(b)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.DockerImageManifestMediaType)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DockerImageConfig)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.DockerImageManifests) > 0 {
+ for _, e := range m.DockerImageManifests {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageBlobReferences) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Layers) > 0 {
+ for _, s := range m.Layers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.Config != nil {
+ l = len(*m.Config)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if len(m.Manifests) > 0 {
+ for _, s := range m.Manifests {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageImportSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.To != nil {
+ l = m.To.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.ImportPolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = m.ReferencePolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageImportStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Image != nil {
+ l = m.Image.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Tag)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Manifests) > 0 {
+ for _, e := range m.Manifests {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageLayer) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.LayerSize))
+ l = len(m.MediaType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageLayerData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LayerSize != nil {
+ n += 1 + sovGenerated(uint64(*m.LayerSize))
+ }
+ l = len(m.MediaType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageLookupPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ return n
+}
+
+func (m *ImageManifest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Digest)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.MediaType)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.ManifestSize))
+ l = len(m.Architecture)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.OS)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Variant)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageSignature) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Content != nil {
+ l = len(m.Content)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ImageIdentity)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.SignedClaims) > 0 {
+ for k, v := range m.SignedClaims {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.Created != nil {
+ l = m.Created.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.IssuedBy != nil {
+ l = m.IssuedBy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.IssuedTo != nil {
+ l = m.IssuedTo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ImageStream) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamImage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Image.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamImport) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamImportSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ if m.Repository != nil {
+ l = m.Repository.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Images) > 0 {
+ for _, e := range m.Images {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageStreamImportStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Import != nil {
+ l = m.Import.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Repository != nil {
+ l = m.Repository.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Images) > 0 {
+ for _, e := range m.Images {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageStreamLayers) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Blobs) > 0 {
+ for k, v := range m.Blobs {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Images) > 0 {
+ for k, v := range m.Images {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ImageStreamList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageStreamMapping) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Image.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Tag)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.DockerImageRepository)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Tags) > 0 {
+ for _, e := range m.Tags {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.LookupPolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.DockerImageRepository)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Tags) > 0 {
+ for _, e := range m.Tags {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.PublicDockerImageRepository)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamTag) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Tag != nil {
+ l = m.Tag.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.Generation))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Image.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LookupPolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ImageStreamTagList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ImageTag) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Spec != nil {
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Status != nil {
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Image != nil {
+ l = m.Image.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ImageTagList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NamedTagEventList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Tag)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RepositoryImportSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ImportPolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ l = m.ReferencePolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *RepositoryImportStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Images) > 0 {
+ for _, e := range m.Images {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.AdditionalTags) > 0 {
+ for _, s := range m.AdditionalTags {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SecretList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *SignatureCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastProbeTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SignatureGenericEntity) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Organization)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.CommonName)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SignatureIssuer) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.SignatureGenericEntity.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SignatureSubject) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.SignatureGenericEntity.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PublicKeyID)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TagEvent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Created.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DockerImageReference)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Image)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Generation))
+ return n
+}
+
+func (m *TagEventCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Generation))
+ return n
+}
+
+func (m *TagImportPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 2
+ n += 2
+ l = len(m.ImportMode)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TagReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.From != nil {
+ l = m.From.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 2
+ if m.Generation != nil {
+ n += 1 + sovGenerated(uint64(*m.Generation))
+ }
+ l = m.ImportPolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ReferencePolicy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TagReferencePolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *DockerImageReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&DockerImageReference{`,
+ `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`,
+ `ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Image) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForDockerImageLayers := "[]ImageLayer{"
+ for _, f := range this.DockerImageLayers {
+ repeatedStringForDockerImageLayers += strings.Replace(strings.Replace(f.String(), "ImageLayer", "ImageLayer", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDockerImageLayers += "}"
+ repeatedStringForSignatures := "[]ImageSignature{"
+ for _, f := range this.Signatures {
+ repeatedStringForSignatures += strings.Replace(strings.Replace(f.String(), "ImageSignature", "ImageSignature", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSignatures += "}"
+ repeatedStringForDockerImageManifests := "[]ImageManifest{"
+ for _, f := range this.DockerImageManifests {
+ repeatedStringForDockerImageManifests += strings.Replace(strings.Replace(f.String(), "ImageManifest", "ImageManifest", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForDockerImageManifests += "}"
+ s := strings.Join([]string{`&Image{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`,
+ `DockerImageMetadata:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DockerImageMetadata), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `DockerImageMetadataVersion:` + fmt.Sprintf("%v", this.DockerImageMetadataVersion) + `,`,
+ `DockerImageManifest:` + fmt.Sprintf("%v", this.DockerImageManifest) + `,`,
+ `DockerImageLayers:` + repeatedStringForDockerImageLayers + `,`,
+ `Signatures:` + repeatedStringForSignatures + `,`,
+ `DockerImageSignatures:` + fmt.Sprintf("%v", this.DockerImageSignatures) + `,`,
+ `DockerImageManifestMediaType:` + fmt.Sprintf("%v", this.DockerImageManifestMediaType) + `,`,
+ `DockerImageConfig:` + fmt.Sprintf("%v", this.DockerImageConfig) + `,`,
+ `DockerImageManifests:` + repeatedStringForDockerImageManifests + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageBlobReferences) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageBlobReferences{`,
+ `Layers:` + fmt.Sprintf("%v", this.Layers) + `,`,
+ `Config:` + valueToStringGenerated(this.Config) + `,`,
+ `ImageMissing:` + fmt.Sprintf("%v", this.ImageMissing) + `,`,
+ `Manifests:` + fmt.Sprintf("%v", this.Manifests) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageImportSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageImportSpec{`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`,
+ `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`,
+ `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageImportStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForManifests := "[]Image{"
+ for _, f := range this.Manifests {
+ repeatedStringForManifests += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForManifests += "}"
+ s := strings.Join([]string{`&ImageImportStatus{`,
+ `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`,
+ `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`,
+ `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`,
+ `Manifests:` + repeatedStringForManifests + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageLayer) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageLayer{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `LayerSize:` + fmt.Sprintf("%v", this.LayerSize) + `,`,
+ `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageLayerData) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageLayerData{`,
+ `LayerSize:` + valueToStringGenerated(this.LayerSize) + `,`,
+ `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Image{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ImageList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageLookupPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageLookupPolicy{`,
+ `Local:` + fmt.Sprintf("%v", this.Local) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageManifest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageManifest{`,
+ `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+ `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
+ `ManifestSize:` + fmt.Sprintf("%v", this.ManifestSize) + `,`,
+ `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
+ `OS:` + fmt.Sprintf("%v", this.OS) + `,`,
+ `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageSignature) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]SignatureCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "SignatureCondition", "SignatureCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ keysForSignedClaims := make([]string, 0, len(this.SignedClaims))
+ for k := range this.SignedClaims {
+ keysForSignedClaims = append(keysForSignedClaims, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims)
+ mapStringForSignedClaims := "map[string]string{"
+ for _, k := range keysForSignedClaims {
+ mapStringForSignedClaims += fmt.Sprintf("%v: %v,", k, this.SignedClaims[k])
+ }
+ mapStringForSignedClaims += "}"
+ s := strings.Join([]string{`&ImageSignature{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Content:` + valueToStringGenerated(this.Content) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ImageIdentity:` + fmt.Sprintf("%v", this.ImageIdentity) + `,`,
+ `SignedClaims:` + mapStringForSignedClaims + `,`,
+ `Created:` + strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1) + `,`,
+ `IssuedBy:` + strings.Replace(this.IssuedBy.String(), "SignatureIssuer", "SignatureIssuer", 1) + `,`,
+ `IssuedTo:` + strings.Replace(this.IssuedTo.String(), "SignatureSubject", "SignatureSubject", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStream) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageStream{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamSpec", "ImageStreamSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamStatus", "ImageStreamStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamImage) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageStreamImage{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamImport) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageStreamImport{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamImportSpec", "ImageStreamImportSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamImportStatus", "ImageStreamImportStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamImportSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForImages := "[]ImageImportSpec{"
+ for _, f := range this.Images {
+ repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportSpec", "ImageImportSpec", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForImages += "}"
+ s := strings.Join([]string{`&ImageStreamImportSpec{`,
+ `Import:` + fmt.Sprintf("%v", this.Import) + `,`,
+ `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportSpec", "RepositoryImportSpec", 1) + `,`,
+ `Images:` + repeatedStringForImages + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamImportStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForImages := "[]ImageImportStatus{"
+ for _, f := range this.Images {
+ repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForImages += "}"
+ s := strings.Join([]string{`&ImageStreamImportStatus{`,
+ `Import:` + strings.Replace(this.Import.String(), "ImageStream", "ImageStream", 1) + `,`,
+ `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportStatus", "RepositoryImportStatus", 1) + `,`,
+ `Images:` + repeatedStringForImages + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamLayers) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForBlobs := make([]string, 0, len(this.Blobs))
+ for k := range this.Blobs {
+ keysForBlobs = append(keysForBlobs, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs)
+ mapStringForBlobs := "map[string]ImageLayerData{"
+ for _, k := range keysForBlobs {
+ mapStringForBlobs += fmt.Sprintf("%v: %v,", k, this.Blobs[k])
+ }
+ mapStringForBlobs += "}"
+ keysForImages := make([]string, 0, len(this.Images))
+ for k := range this.Images {
+ keysForImages = append(keysForImages, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForImages)
+ mapStringForImages := "map[string]ImageBlobReferences{"
+ for _, k := range keysForImages {
+ mapStringForImages += fmt.Sprintf("%v: %v,", k, this.Images[k])
+ }
+ mapStringForImages += "}"
+ s := strings.Join([]string{`&ImageStreamLayers{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Blobs:` + mapStringForBlobs + `,`,
+ `Images:` + mapStringForImages + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ImageStream{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStream", "ImageStream", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ImageStreamList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamMapping) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageStreamMapping{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+ `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTags := "[]TagReference{"
+ for _, f := range this.Tags {
+ repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "TagReference", "TagReference", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTags += "}"
+ s := strings.Join([]string{`&ImageStreamSpec{`,
+ `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`,
+ `Tags:` + repeatedStringForTags + `,`,
+ `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTags := "[]NamedTagEventList{"
+ for _, f := range this.Tags {
+ repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "NamedTagEventList", "NamedTagEventList", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTags += "}"
+ s := strings.Join([]string{`&ImageStreamStatus{`,
+ `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`,
+ `Tags:` + repeatedStringForTags + `,`,
+ `PublicDockerImageRepository:` + fmt.Sprintf("%v", this.PublicDockerImageRepository) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamTag) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]TagEventCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ImageStreamTag{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Tag:` + strings.Replace(this.Tag.String(), "TagReference", "TagReference", 1) + `,`,
+ `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+ `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageStreamTagList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ImageStreamTag{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStreamTag", "ImageStreamTag", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ImageStreamTagList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageTag) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ImageTag{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(this.Spec.String(), "TagReference", "TagReference", 1) + `,`,
+ `Status:` + strings.Replace(this.Status.String(), "NamedTagEventList", "NamedTagEventList", 1) + `,`,
+ `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ImageTagList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ImageTag{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageTag", "ImageTag", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ImageTagList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedTagEventList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]TagEvent{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TagEvent", "TagEvent", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ repeatedStringForConditions := "[]TagEventCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&NamedTagEventList{`,
+ `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RepositoryImportSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RepositoryImportSpec{`,
+ `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`,
+ `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`,
+ `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RepositoryImportStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForImages := "[]ImageImportStatus{"
+ for _, f := range this.Images {
+ repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForImages += "}"
+ s := strings.Join([]string{`&RepositoryImportStatus{`,
+ `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`,
+ `Images:` + repeatedStringForImages + `,`,
+ `AdditionalTags:` + fmt.Sprintf("%v", this.AdditionalTags) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SecretList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Secret{"
+ for _, f := range this.Items {
+ repeatedStringForItems += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&SecretList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SignatureCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SignatureCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SignatureGenericEntity) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SignatureGenericEntity{`,
+ `Organization:` + fmt.Sprintf("%v", this.Organization) + `,`,
+ `CommonName:` + fmt.Sprintf("%v", this.CommonName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SignatureIssuer) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SignatureIssuer{`,
+ `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SignatureSubject) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SignatureSubject{`,
+ `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`,
+ `PublicKeyID:` + fmt.Sprintf("%v", this.PublicKeyID) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TagEvent) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TagEvent{`,
+ `Created:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`,
+ `Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+ `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TagEventCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TagEventCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TagImportPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TagImportPolicy{`,
+ `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`,
+ `Scheduled:` + fmt.Sprintf("%v", this.Scheduled) + `,`,
+ `ImportMode:` + fmt.Sprintf("%v", this.ImportMode) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TagReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForAnnotations := make([]string, 0, len(this.Annotations))
+ for k := range this.Annotations {
+ keysForAnnotations = append(keysForAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ mapStringForAnnotations := "map[string]string{"
+ for _, k := range keysForAnnotations {
+ mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+ }
+ mapStringForAnnotations += "}"
+ s := strings.Join([]string{`&TagReference{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Annotations:` + mapStringForAnnotations + `,`,
+ `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`,
+ `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`,
+ `Generation:` + valueToStringGenerated(this.Generation) + `,`,
+ `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`,
+ `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TagReferencePolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TagReferencePolicy{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *DockerImageReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DockerImageReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DockerImageReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Registry = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tag = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Image) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Image: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageReference = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DockerImageMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadataVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageMetadataVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifest", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageManifest = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageLayers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageLayers = append(m.DockerImageLayers, ImageLayer{})
+ if err := m.DockerImageLayers[len(m.DockerImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signatures = append(m.Signatures, ImageSignature{})
+ if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageSignatures", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageSignatures = append(m.DockerImageSignatures, make([]byte, postIndex-iNdEx))
+ copy(m.DockerImageSignatures[len(m.DockerImageSignatures)-1], dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifestMediaType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageManifestMediaType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageConfig", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageConfig = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageManifests = append(m.DockerImageManifests, ImageManifest{})
+ if err := m.DockerImageManifests[len(m.DockerImageManifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageBlobReferences) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageBlobReferences: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageBlobReferences: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Layers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Layers = append(m.Layers, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Config = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageMissing", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ImageMissing = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Manifests = append(m.Manifests, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageImportSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.To == nil {
+ m.To = &v11.LocalObjectReference{}
+ }
+ if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IncludeManifest = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageImportStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageImportStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageImportStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Image == nil {
+ m.Image = &Image{}
+ }
+ if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tag = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Manifests = append(m.Manifests, Image{})
+ if err := m.Manifests[len(m.Manifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageLayer) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType)
+ }
+ m.LayerSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.LayerSize |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MediaType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageLayerData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageLayerData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageLayerData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LayerSize = &v
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MediaType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Image{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageLookupPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageLookupPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageLookupPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Local = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageManifest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageManifest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageManifest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Digest = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MediaType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ManifestSize", wireType)
+ }
+ m.ManifestSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ManifestSize |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Architecture = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.OS = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Variant = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageSignature) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageSignature: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageSignature: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Content = append(m.Content[:0], dAtA[iNdEx:postIndex]...)
+ if m.Content == nil {
+ m.Content = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, SignatureCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImageIdentity", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImageIdentity = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignedClaims", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SignedClaims == nil {
+ m.SignedClaims = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.SignedClaims[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Created == nil {
+ m.Created = &v1.Time{}
+ }
+ if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IssuedBy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IssuedBy == nil {
+ m.IssuedBy = &SignatureIssuer{}
+ }
+ if err := m.IssuedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IssuedTo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IssuedTo == nil {
+ m.IssuedTo = &SignatureSubject{}
+ }
+ if err := m.IssuedTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStream) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStream: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStream: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamImage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamImage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamImage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamImport) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamImport: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamImport: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamImportSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamImportSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamImportSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Import = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Repository == nil {
+ m.Repository = &RepositoryImportSpec{}
+ }
+ if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Images = append(m.Images, ImageImportSpec{})
+ if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamImportStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamImportStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamImportStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Import == nil {
+ m.Import = &ImageStream{}
+ }
+ if err := m.Import.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Repository == nil {
+ m.Repository = &RepositoryImportStatus{}
+ }
+ if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Images = append(m.Images, ImageImportStatus{})
+ if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamLayers) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamLayers: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamLayers: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Blobs == nil {
+ m.Blobs = make(map[string]ImageLayerData)
+ }
+ var mapkey string
+ mapvalue := &ImageLayerData{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &ImageLayerData{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Blobs[mapkey] = *mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Images == nil {
+ m.Images = make(map[string]ImageBlobReferences)
+ }
+ var mapkey string
+ mapvalue := &ImageBlobReferences{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &ImageBlobReferences{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Images[mapkey] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ImageStream{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamMapping) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamMapping: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamMapping: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tag = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageRepository = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tags = append(m.Tags, TagReference{})
+ if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageRepository = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tags = append(m.Tags, NamedTagEventList{})
+ if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PublicDockerImageRepository", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PublicDockerImageRepository = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamTag) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamTag: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamTag: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Tag == nil {
+ m.Tag = &TagReference{}
+ }
+ if err := m.Tag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ }
+ m.Generation = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Generation |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, TagEventCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageStreamTagList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageStreamTagList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageStreamTagList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ImageStreamTag{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageTag) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageTag: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageTag: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Spec == nil {
+ m.Spec = &TagReference{}
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Status == nil {
+ m.Status = &NamedTagEventList{}
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Image == nil {
+ m.Image = &Image{}
+ }
+ if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ImageTagList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ImageTagList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ImageTagList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ImageTag{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedTagEventList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedTagEventList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedTagEventList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Tag = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, TagEvent{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, TagEventCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RepositoryImportSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RepositoryImportSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RepositoryImportSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IncludeManifest = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RepositoryImportStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RepositoryImportStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RepositoryImportStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Images = append(m.Images, ImageImportStatus{})
+ if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdditionalTags", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AdditionalTags = append(m.AdditionalTags, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SecretList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SecretList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, v11.Secret{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignatureCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignatureCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignatureCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = SignatureConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignatureGenericEntity) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignatureGenericEntity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignatureGenericEntity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Organization = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CommonName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CommonName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignatureIssuer) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignatureIssuer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignatureIssuer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignatureSubject) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignatureSubject: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignatureSubject: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PublicKeyID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PublicKeyID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TagEvent) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TagEvent: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TagEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DockerImageReference = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Image = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ }
+ m.Generation = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Generation |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TagEventCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TagEventCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TagEventCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = TagEventConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ }
+ m.Generation = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Generation |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TagImportPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TagImportPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TagImportPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Insecure = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scheduled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Scheduled = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImportMode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImportMode = ImportModeType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TagReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TagReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TagReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.From == nil {
+ m.From = &v11.ObjectReference{}
+ }
+ if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Reference = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Generation = &v
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TagReferencePolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TagReferencePolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TagReferencePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = TagReferencePolicyType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto
new file mode 100644
index 0000000000..0b7ae71822
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/generated.proto
@@ -0,0 +1,746 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.image.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/image/v1";
+
+// DockerImageReference points to a container image.
+message DockerImageReference {
+ // Registry is the registry that contains the container image
+ optional string registry = 1;
+
+ // Namespace is the namespace that contains the container image
+ optional string namespace = 2;
+
+ // Name is the name of the container image
+ optional string name = 3;
+
+ // Tag is which tag of the container image is being referenced
+ optional string tag = 4;
+
+ // ID is the identifier for the container image
+ optional string iD = 5;
+}
+
+// Image is an immutable representation of a container image and metadata at a point in time.
+// Images are named by taking a hash of their contents (metadata and content) and any change
+// in format, content, or metadata results in a new name. The images resource is primarily
+// for use by cluster administrators and integrations like the cluster image registry - end
+// users instead access images via the imagestreamtags or imagestreamimages resources. While
+// image metadata is stored in the API, any integration that implements the container image
+// registry API must provide its own storage for the raw manifest data, image config, and
+// layer contents.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message Image {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // DockerImageReference is the string that can be used to pull this image.
+ optional string dockerImageReference = 2;
+
+ // DockerImageMetadata contains metadata about this image
+ // +patchStrategy=replace
+ // +kubebuilder:pruning:PreserveUnknownFields
+ optional k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3;
+
+ // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0"
+ optional string dockerImageMetadataVersion = 4;
+
+ // DockerImageManifest is the raw JSON of the manifest
+ optional string dockerImageManifest = 5;
+
+ // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.
+ repeated ImageLayer dockerImageLayers = 6;
+
+ // Signatures holds all signatures of the image.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated ImageSignature signatures = 7;
+
+ // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.
+ repeated bytes dockerImageSignatures = 8;
+
+ // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.
+ optional string dockerImageManifestMediaType = 9;
+
+ // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2.
+ // Will not be set when the image represents a manifest list.
+ optional string dockerImageConfig = 10;
+
+ // DockerImageManifests holds information about sub-manifests when the image represents a manifest list.
+ // When this field is present, no DockerImageLayers should be specified.
+ repeated ImageManifest dockerImageManifests = 11;
+}
+
+// ImageBlobReferences describes the blob references within an image.
+message ImageBlobReferences {
+ // imageMissing is true if the image is referenced by the image stream but the image
+ // object has been deleted from the API by an administrator. When this field is set,
+ // layers and config fields may be empty and callers that depend on the image metadata
+ // should consider the image to be unavailable for download or viewing.
+ // +optional
+ optional bool imageMissing = 3;
+
+ // layers is the list of blobs that compose this image, from base layer to top layer.
+ // All layers referenced by this array will be defined in the blobs map. Some images
+ // may have zero layers.
+ // +optional
+ repeated string layers = 1;
+
+ // config, if set, is the blob that contains the image config. Some images do
+ // not have separate config blobs and this field will be set to nil if so.
+ // +optional
+ optional string config = 2;
+
+ // manifests is the list of other image names that this image points
+ // to. For a single architecture image, it is empty. For a multi-arch
+ // image, it consists of the digests of single architecture images,
+ // such images shouldn't have layers nor config.
+ // +optional
+ repeated string manifests = 4;
+}
+
+// ImageImportSpec describes a request to import a specific image.
+message ImageImportSpec {
+ // From is the source of an image to import; only kind DockerImage is allowed
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+
+ // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used
+ optional k8s.io.api.core.v1.LocalObjectReference to = 2;
+
+ // ImportPolicy is the policy controlling how the image is imported
+ optional TagImportPolicy importPolicy = 3;
+
+ // ReferencePolicy defines how other components should consume the image
+ optional TagReferencePolicy referencePolicy = 5;
+
+ // IncludeManifest determines if the manifest for each image is returned in the response
+ optional bool includeManifest = 4;
+}
+
+// ImageImportStatus describes the result of an image import.
+message ImageImportStatus {
+ // Status is the status of the image import, including errors encountered while retrieving the image
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1;
+
+ // Image is the metadata of that image, if the image was located
+ optional Image image = 2;
+
+ // Tag is the tag this image was located under, if any
+ optional string tag = 3;
+
+ // Manifests holds sub-manifests metadata when importing a manifest list
+ repeated Image manifests = 4;
+}
+
+// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.
+message ImageLayer {
+ // Name of the layer as defined by the underlying store.
+ optional string name = 1;
+
+ // Size of the layer in bytes as defined by the underlying store.
+ optional int64 size = 2;
+
+ // MediaType of the referenced object.
+ optional string mediaType = 3;
+}
+
+// ImageLayerData contains metadata about an image layer.
+message ImageLayerData {
+ // Size of the layer in bytes as defined by the underlying store. This field is
+ // optional if the necessary information about size is not available.
+ optional int64 size = 1;
+
+ // MediaType of the referenced object.
+ optional string mediaType = 2;
+}
+
+// ImageList is a list of Image objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of images
+ repeated Image items = 2;
+}
+
+// ImageLookupPolicy describes how an image stream can be used to override the image references
+// used by pods, builds, and other resources in a namespace.
+message ImageLookupPolicy {
+ // local will change the docker short image references (like "mysql" or
+ // "php:latest") on objects in this namespace to the image ID whenever they match
+ // this image stream, instead of reaching out to a remote registry. The name will
+ // be fully qualified to an image ID if found. The tag's referencePolicy is taken
+ // into account on the replaced value. Only works within the current namespace.
+ optional bool local = 3;
+}
+
+// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular
+// Image object.
+message ImageManifest {
+ // Digest is the unique identifier for the manifest. It refers to an Image object.
+ optional string digest = 1;
+
+ // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json,
+ // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.
+ optional string mediaType = 2;
+
+ // ManifestSize represents the size of the raw object contents, in bytes.
+ optional int64 manifestSize = 3;
+
+ // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.
+ optional string architecture = 4;
+
+ // OS specifies the operating system, for example `linux`.
+ optional string os = 5;
+
+ // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU
+ // variant of the ARM CPU.
+ optional string variant = 6;
+}
+
+// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims
+// as long as the signature is trusted. Based on this information it is possible to restrict runnable images
+// to those matching cluster-wide policy.
+// Mandatory fields should be parsed by clients doing image verification. The others are parsed from
+// signature's content by the server. They serve just an informative purpose.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageSignature {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Required: Describes a type of stored blob.
+ optional string type = 2;
+
+ // Required: An opaque binary string which is an image's signature.
+ optional bytes content = 3;
+
+ // Conditions represent the latest available observations of a signature's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated SignatureCondition conditions = 4;
+
+ // A human readable string representing image's identity. It could be a product name and version, or an
+ // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2").
+ optional string imageIdentity = 5;
+
+ // Contains claims from the signature.
+ map signedClaims = 6;
+
+ // If specified, it is the time of signature's creation.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 7;
+
+ // If specified, it holds information about an issuer of signing certificate or key (a person or entity
+ // who signed the signing certificate or key).
+ optional SignatureIssuer issuedBy = 8;
+
+ // If specified, it holds information about a subject of signing certificate or key (a person or entity
+ // who signed the image).
+ optional SignatureSubject issuedTo = 9;
+}
+
+// An ImageStream stores a mapping of tags to images, metadata overrides that are applied
+// when images are tagged in a stream, and an optional reference to a container image
+// repository on a registry. Users typically update the spec.tags field to point to external
+// images which are imported from container registries using credentials in your namespace
+// with the pull secret type, or to existing image stream tags and images which are
+// immediately accessible for tagging or pulling. The history of images applied to a tag
+// is visible in the status.tags field and any user who can view an image stream is allowed
+// to tag that image into their own image streams. Access to pull images from the integrated
+// registry is granted by having the "get imagestreams/layers" permission on a given image
+// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both
+// spec and status for that tag to be removed. Image stream history is retained until an
+// administrator runs the prune operation, which removes references that are no longer in
+// use. To preserve a historical image, ensure there is a tag in spec pointing to that image
+// by its digest.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStream {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec describes the desired state of this stream
+ // +optional
+ optional ImageStreamSpec spec = 2;
+
+ // Status describes the current state of this stream
+ // +optional
+ optional ImageStreamStatus status = 3;
+}
+
+// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream.
+// User interfaces and regular users can use this resource to access the metadata details of
+// a tagged image in the image stream history for viewing, since Image resources are not
+// directly accessible to end users. A not found error will be returned if no such image is
+// referenced by a tag within the ImageStream. Images are created when spec tags are set on
+// an image stream that represent an image in an external registry, when pushing to the
+// integrated registry, or when tagging an existing image from one image stream to another.
+// The name of an image stream image is in the form "@", where the digest is
+// the content addressible identifier for the image (sha256:xxxxx...). You can use
+// ImageStreamImages as the from.kind of an image stream spec tag to reference an image
+// exactly. The only operations supported on the imagestreamimage endpoint are retrieving
+// the image.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStreamImage {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Image associated with the ImageStream and image name.
+ optional Image image = 2;
+}
+
+// The image stream import resource provides an easy way for a user to find and import container images
+// from other container image registries into the server. Individual images or an entire image repository may
+// be imported, and users may choose to see the results of the import prior to tagging the resulting
+// images into the specified image stream.
+//
+// This API is intended for end-user tools that need to see the metadata of the image prior to import
+// (for instance, to generate an application from it). Clients that know the desired image can continue
+// to create spec.tags directly into their image streams.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStreamImport {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec is a description of the images that the user wishes to import
+ optional ImageStreamImportSpec spec = 2;
+
+ // Status is the result of importing the image
+ optional ImageStreamImportStatus status = 3;
+}
+
+// ImageStreamImportSpec defines what images should be imported.
+message ImageStreamImportSpec {
+ // Import indicates whether to perform an import - if so, the specified tags are set on the spec
+ // and status of the image stream defined by the type meta.
+ optional bool import = 1;
+
+ // Repository is an optional import of an entire container image repository. A maximum limit on the
+ // number of tags imported this way is imposed by the server.
+ optional RepositoryImportSpec repository = 2;
+
+ // Images are a list of individual images to import.
+ repeated ImageImportSpec images = 3;
+}
+
+// ImageStreamImportStatus contains information about the status of an image stream import.
+message ImageStreamImportStatus {
+ // Import is the image stream that was successfully updated or created when 'to' was set.
+ optional ImageStream import = 1;
+
+ // Repository is set if spec.repository was set to the outcome of the import
+ optional RepositoryImportStatus repository = 2;
+
+ // Images is set with the result of importing spec.images
+ repeated ImageImportStatus images = 3;
+}
+
+// ImageStreamLayers describes information about the layers referenced by images in this
+// image stream.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStreamLayers {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // blobs is a map of blob name to metadata about the blob.
+ map blobs = 2;
+
+ // images is a map between an image name and the names of the blobs and config that
+ // comprise the image.
+ map images = 3;
+}
+
+// ImageStreamList is a list of ImageStream objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStreamList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of imageStreams
+ repeated ImageStream items = 2;
+}
+
+// ImageStreamMapping represents a mapping from a single image stream tag to a container
+// image as well as the reference to the container image stream the image came from. This
+// resource is used by privileged integrators to create an image resource and to associate
+// it with an image stream in the status tags field. Creating an ImageStreamMapping will
+// allow any user who can view the image stream to tag or pull that image, so only create
+// mappings where the user has proven they have access to the image contents directly.
+// The only operation supported for this resource is create and the metadata name and
+// namespace should be set to the image stream containing the tag that should be updated.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStreamMapping {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Image is a container image.
+ optional Image image = 2;
+
+ // Tag is a string value this image can be located with inside the stream.
+ optional string tag = 3;
+}
+
+// ImageStreamSpec represents options for ImageStreams.
+message ImageStreamSpec {
+ // lookupPolicy controls how other resources reference images within this namespace.
+ optional ImageLookupPolicy lookupPolicy = 3;
+
+ // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server
+ // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release.
+ // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.
+ optional string dockerImageRepository = 1;
+
+ // tags map arbitrary string values to specific image locators
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ repeated TagReference tags = 2;
+}
+
+// ImageStreamStatus contains information about the state of this image stream.
+message ImageStreamStatus {
+ // DockerImageRepository represents the effective location this stream may be accessed at.
+ // May be empty until the server determines where the repository is located
+ optional string dockerImageRepository = 1;
+
+ // PublicDockerImageRepository represents the public location from where the image can
+ // be pulled outside the cluster. This field may be empty if the administrator
+ // has not exposed the integrated registry externally.
+ optional string publicDockerImageRepository = 3;
+
+ // Tags are a historical record of images associated with each tag. The first entry in the
+ // TagEvent array is the currently tagged image.
+ // +patchMergeKey=tag
+ // +patchStrategy=merge
+ repeated NamedTagEventList tags = 2;
+}
+
+// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream.
+// Use this resource to interact with the tags and images in an image stream by tag, or
+// to see the image details for a particular tag. The image associated with this resource
+// is the most recently successfully tagged, imported, or pushed image (as described in the
+// image stream status.tags.items list for this tag). If an import is in progress or has
+// failed the previous image will be shown. Deleting an image stream tag clears both the
+// status and spec fields of an image stream. If no image can be retrieved for a given tag,
+// a not found error will be returned.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStreamTag {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // tag is the spec tag associated with this image stream tag, and it may be null
+ // if only pushes have occurred to this image stream.
+ optional TagReference tag = 2;
+
+ // generation is the current generation of the tagged image - if tag is provided
+ // and this value is not equal to the tag generation, a user has requested an
+ // import that has not completed, or conditions will be filled out indicating any
+ // error.
+ optional int64 generation = 3;
+
+ // lookupPolicy indicates whether this tag will handle image references in this
+ // namespace.
+ optional ImageLookupPolicy lookupPolicy = 6;
+
+ // conditions is an array of conditions that apply to the image stream tag.
+ repeated TagEventCondition conditions = 4;
+
+ // image associated with the ImageStream and tag.
+ optional Image image = 5;
+}
+
+// ImageStreamTagList is a list of ImageStreamTag objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageStreamTagList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of image stream tags
+ repeated ImageStreamTag items = 2;
+}
+
+// ImageTag represents a single tag within an image stream and includes the spec,
+// the status history, and the currently referenced image (if any) of the provided
+// tag. This type replaces the ImageStreamTag by providing a full view of the tag.
+// ImageTags are returned for every spec or status tag present on the image stream.
+// If no tag exists in either form a not found error will be returned by the API.
+// A create operation will succeed if no spec tag has already been defined and the
+// spec field is set. Delete will remove both spec and status elements from the
+// image stream.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageTag {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is the spec tag associated with this image stream tag, and it may be null
+ // if only pushes have occurred to this image stream.
+ optional TagReference spec = 2;
+
+ // status is the status tag details associated with this image stream tag, and it
+ // may be null if no push or import has been performed.
+ optional NamedTagEventList status = 3;
+
+ // image is the details of the most recent image stream status tag, and it may be
+ // null if import has not completed or an administrator has deleted the image
+ // object. To verify this is the most recent image, you must verify the generation
+ // of the most recent status.items entry matches the spec tag (if a spec tag is
+ // set). This field will not be set when listing image tags.
+ optional Image image = 4;
+}
+
+// ImageTagList is a list of ImageTag objects. When listing image tags, the image
+// field is not populated. Tags are returned in alphabetical order by image stream
+// and then tag.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ImageTagList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of image stream tags
+ repeated ImageTag items = 2;
+}
+
+// NamedTagEventList relates a tag to its image history.
+message NamedTagEventList {
+ // Tag is the tag for which the history is recorded
+ optional string tag = 1;
+
+ // Standard object's metadata.
+ repeated TagEvent items = 2;
+
+ // Conditions is an array of conditions that apply to the tag event list.
+ repeated TagEventCondition conditions = 3;
+}
+
+// RepositoryImportSpec describes a request to import images from a container image repository.
+message RepositoryImportSpec {
+ // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed
+ optional k8s.io.api.core.v1.ObjectReference from = 1;
+
+ // ImportPolicy is the policy controlling how the image is imported
+ optional TagImportPolicy importPolicy = 2;
+
+ // ReferencePolicy defines how other components should consume the image
+ optional TagReferencePolicy referencePolicy = 4;
+
+ // IncludeManifest determines if the manifest for each image is returned in the response
+ optional bool includeManifest = 3;
+}
+
+// RepositoryImportStatus describes the result of an image repository import
+message RepositoryImportStatus {
+ // Status reflects whether any failure occurred during import
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1;
+
+ // Images is a list of images successfully retrieved by the import of the repository.
+ repeated ImageImportStatus images = 2;
+
+ // AdditionalTags are tags that exist in the repository but were not imported because
+ // a maximum limit of automatic imports was applied.
+ repeated string additionalTags = 3;
+}
+
+// SecretList is a list of Secret.
+// +openshift:compatibility-gen:level=1
+message SecretList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of secret objects.
+ // More info: https://kubernetes.io/docs/concepts/configuration/secret
+ repeated k8s.io.api.core.v1.Secret items = 2;
+}
+
+// SignatureCondition describes an image signature condition of particular kind at particular probe time.
+message SignatureCondition {
+ // Type of signature condition, Complete or Failed.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition was checked.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
+
+ // Last time the condition transit from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+ // (brief) reason for the condition's last transition.
+ optional string reason = 5;
+
+ // Human readable message indicating details about last transition.
+ optional string message = 6;
+}
+
+// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject
+// of signing certificate or key.
+message SignatureGenericEntity {
+ // Organization name.
+ optional string organization = 1;
+
+ // Common name (e.g. openshift-signing-service).
+ optional string commonName = 2;
+}
+
+// SignatureIssuer holds information about an issuer of signing certificate or key.
+message SignatureIssuer {
+ optional SignatureGenericEntity signatureGenericEntity = 1;
+}
+
+// SignatureSubject holds information about a person or entity who created the signature.
+message SignatureSubject {
+ optional SignatureGenericEntity signatureGenericEntity = 1;
+
+ // If present, it is a human readable key id of public key belonging to the subject used to verify image
+ // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g.
+ // 0x685ebe62bf278440).
+ optional string publicKeyID = 2;
+}
+
+// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.
+message TagEvent {
+ // Created holds the time the TagEvent was created
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1;
+
+ // DockerImageReference is the string that can be used to pull this image
+ optional string dockerImageReference = 2;
+
+ // Image is the image
+ optional string image = 3;
+
+ // Generation is the spec tag generation that resulted in this tag being updated
+ optional int64 generation = 4;
+}
+
+// TagEventCondition contains condition information for a tag event.
+message TagEventCondition {
+ // Type of tag event condition, currently only ImportSuccess
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // LastTransitionTIme is the time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // Reason is a brief machine readable explanation for the condition's last transition.
+ optional string reason = 4;
+
+ // Message is a human readable description of the details about last transition, complementing reason.
+ optional string message = 5;
+
+ // Generation is the spec tag generation that this status corresponds to
+ optional int64 generation = 6;
+}
+
+// TagImportPolicy controls how images related to this tag will be imported.
+message TagImportPolicy {
+ // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.
+ optional bool insecure = 1;
+
+ // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported
+ optional bool scheduled = 2;
+
+ // ImportMode describes how to import an image manifest.
+ optional string importMode = 3;
+}
+
+// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.
+message TagReference {
+ // Name of the tag
+ optional string name = 1;
+
+ // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.
+ // +optional
+ map annotations = 2;
+
+ // Optional; if specified, a reference to another image that this tag should point to. Valid values
+ // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references
+ // can only reference a tag within this same ImageStream.
+ optional k8s.io.api.core.v1.ObjectReference from = 3;
+
+ // Reference states if the tag will be imported. Default value is false, which means the tag will
+ // be imported.
+ optional bool reference = 4;
+
+ // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference
+ // is changed the generation is set to match the current stream generation (which is incremented every
+ // time spec is changed). Other processes in the system like the image importer observe that the
+ // generation of spec tag is newer than the generation recorded in the status and use that as a trigger
+ // to import the newest remote tag. To trigger a new import, clients may set this value to zero which
+ // will reset the generation to the latest stream generation. Legacy clients will send this value as
+ // nil which will be merged with the current tag generation.
+ // +optional
+ optional int64 generation = 5;
+
+ // ImportPolicy is information that controls how images may be imported by the server.
+ optional TagImportPolicy importPolicy = 6;
+
+ // ReferencePolicy defines how other components should consume the image.
+ optional TagReferencePolicy referencePolicy = 7;
+}
+
+// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when
+// image change triggers in deployment configs or builds are resolved. This allows the image stream
+// author to control how images are accessed.
+message TagReferencePolicy {
+ // Type determines how the image pull spec should be transformed when the image stream tag is used in
+ // deployment config triggers or new builds. The default value is `Source`, indicating the original
+ // location of the image should be used (if imported). The user may also specify `Local`, indicating
+ // that the pull spec should point to the integrated container image registry and leverage the registry's
+ // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this
+ // image to be managed from the image stream's namespace, so others on the platform can access a remote
+ // image but have no access to the remote secret. It also allows the image layers to be mirrored into
+ // the local registry which the images can still be pulled even if the upstream registry is unavailable.
+ optional string type = 1;
+}
+
diff --git a/vendor/github.com/openshift/api/image/v1/legacy.go b/vendor/github.com/openshift/api/image/v1/legacy.go
new file mode 100644
index 0000000000..02bbaa2906
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/legacy.go
@@ -0,0 +1,33 @@
+package v1
+
+import (
+ "github.com/openshift/api/image/docker10"
+ "github.com/openshift/api/image/dockerpre012"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, docker10.AddToSchemeInCoreGroup, dockerpre012.AddToSchemeInCoreGroup, corev1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &Image{},
+ &ImageList{},
+ &ImageSignature{},
+ &ImageStream{},
+ &ImageStreamList{},
+ &ImageStreamMapping{},
+ &ImageStreamTag{},
+ &ImageStreamTagList{},
+ &ImageStreamImage{},
+ &ImageStreamImport{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/image/v1/register.go b/vendor/github.com/openshift/api/image/v1/register.go
new file mode 100644
index 0000000000..0d924103a6
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/register.go
@@ -0,0 +1,54 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/openshift/api/image/docker10"
+ "github.com/openshift/api/image/dockerpre012"
+)
+
+var (
+ GroupName = "image.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, docker10.AddToScheme, dockerpre012.AddToScheme, corev1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &Image{},
+ &ImageList{},
+ &ImageSignature{},
+ &ImageStream{},
+ &ImageStreamList{},
+ &ImageStreamMapping{},
+ &ImageStreamTag{},
+ &ImageStreamTagList{},
+ &ImageStreamImage{},
+ &ImageStreamLayers{},
+ &ImageStreamImport{},
+ &ImageTag{},
+ &ImageTagList{},
+ &SecretList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go
new file mode 100644
index 0000000000..9919c0fe76
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/types.go
@@ -0,0 +1,766 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageList is a list of Image objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of images
+ Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Image is an immutable representation of a container image and metadata at a point in time.
+// Images are named by taking a hash of their contents (metadata and content) and any change
+// in format, content, or metadata results in a new name. The images resource is primarily
+// for use by cluster administrators and integrations like the cluster image registry - end
+// users instead access images via the imagestreamtags or imagestreamimages resources. While
+// image metadata is stored in the API, any integration that implements the container image
+// registry API must provide its own storage for the raw manifest data, image config, and
+// layer contents.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Image struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // DockerImageReference is the string that can be used to pull this image.
+ DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"`
+ // DockerImageMetadata contains metadata about this image
+ // +patchStrategy=replace
+ // +kubebuilder:pruning:PreserveUnknownFields
+ DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"`
+ // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0"
+ DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"`
+ // DockerImageManifest is the raw JSON of the manifest
+ DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"`
+ // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.
+ DockerImageLayers []ImageLayer `json:"dockerImageLayers,omitempty" protobuf:"bytes,6,rep,name=dockerImageLayers"`
+ // Signatures holds all signatures of the image.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"`
+ // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.
+ DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"`
+ // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.
+ DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"`
+ // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2.
+ // Will not be set when the image represents a manifest list.
+ DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"`
+ // DockerImageManifests holds information about sub-manifests when the image represents a manifest list.
+ // When this field is present, no DockerImageLayers should be specified.
+ DockerImageManifests []ImageManifest `json:"dockerImageManifests,omitempty" protobuf:"bytes,11,rep,name=dockerImageManifests"`
+}
+
+// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular
+// Image object.
+type ImageManifest struct {
+ // Digest is the unique identifier for the manifest. It refers to an Image object.
+ Digest string `json:"digest" protobuf:"bytes,1,opt,name=digest"`
+ // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json,
+ // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.
+ MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"`
+ // ManifestSize represents the size of the raw object contents, in bytes.
+ ManifestSize int64 `json:"manifestSize" protobuf:"varint,3,opt,name=manifestSize"`
+ // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.
+ Architecture string `json:"architecture" protobuf:"bytes,4,opt,name=architecture"`
+ // OS specifies the operating system, for example `linux`.
+ OS string `json:"os" protobuf:"bytes,5,opt,name=os"`
+ // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU
+ // variant of the ARM CPU.
+ Variant string `json:"variant,omitempty" protobuf:"bytes,6,opt,name=variant"`
+}
+
+// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.
+type ImageLayer struct {
+ // Name of the layer as defined by the underlying store.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Size of the layer in bytes as defined by the underlying store.
+ LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"`
+ // MediaType of the referenced object.
+ MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=create,delete
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims
+// as long as the signature is trusted. Based on this information it is possible to restrict runnable images
+// to those matching cluster-wide policy.
+// Mandatory fields should be parsed by clients doing image verification. The others are parsed from
+// signature's content by the server. They serve just an informative purpose.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageSignature struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Required: Describes a type of stored blob.
+ Type string `json:"type" protobuf:"bytes,2,opt,name=type"`
+ // Required: An opaque binary string which is an image's signature.
+ Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"`
+ // Conditions represent the latest available observations of a signature's current state.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
+
+ // Following metadata fields will be set by server if the signature content is successfully parsed and
+ // the information available.
+
+ // A human readable string representing image's identity. It could be a product name and version, or an
+ // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2").
+ ImageIdentity string `json:"imageIdentity,omitempty" protobuf:"bytes,5,opt,name=imageIdentity"`
+ // Contains claims from the signature.
+ SignedClaims map[string]string `json:"signedClaims,omitempty" protobuf:"bytes,6,rep,name=signedClaims"`
+ // If specified, it is the time of signature's creation.
+ Created *metav1.Time `json:"created,omitempty" protobuf:"bytes,7,opt,name=created"`
+ // If specified, it holds information about an issuer of signing certificate or key (a person or entity
+ // who signed the signing certificate or key).
+ IssuedBy *SignatureIssuer `json:"issuedBy,omitempty" protobuf:"bytes,8,opt,name=issuedBy"`
+ // If specified, it holds information about a subject of signing certificate or key (a person or entity
+ // who signed the image).
+ IssuedTo *SignatureSubject `json:"issuedTo,omitempty" protobuf:"bytes,9,opt,name=issuedTo"`
+}
+
+// SignatureConditionType is a type of image signature condition.
+type SignatureConditionType string
+
+// SignatureCondition describes an image signature condition of particular kind at particular probe time.
+type SignatureCondition struct {
+ // Type of signature condition, Complete or Failed.
+ Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // Last time the condition was checked.
+ LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // (brief) reason for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // Human readable message indicating details about last transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject
+// of signing certificate or key.
+type SignatureGenericEntity struct {
+ // Organization name.
+ Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"`
+ // Common name (e.g. openshift-signing-service).
+ CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"`
+}
+
+// SignatureIssuer holds information about an issuer of signing certificate or key.
+type SignatureIssuer struct {
+ SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"`
+}
+
+// SignatureSubject holds information about a person or entity who created the signature.
+type SignatureSubject struct {
+ SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"`
+ // If present, it is a human readable key id of public key belonging to the subject used to verify image
+ // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g.
+ // 0x685ebe62bf278440).
+ PublicKeyID string `json:"publicKeyID" protobuf:"bytes,2,opt,name=publicKeyID"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageStreamList is a list of ImageStream objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStreamList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of imageStreams
+ Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=Secrets,verb=get,subresource=secrets,result=github.com/openshift/api/image/v1.SecretList
+// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// An ImageStream stores a mapping of tags to images, metadata overrides that are applied
+// when images are tagged in a stream, and an optional reference to a container image
+// repository on a registry. Users typically update the spec.tags field to point to external
+// images which are imported from container registries using credentials in your namespace
+// with the pull secret type, or to existing image stream tags and images which are
+// immediately accessible for tagging or pulling. The history of images applied to a tag
+// is visible in the status.tags field and any user who can view an image stream is allowed
+// to tag that image into their own image streams. Access to pull images from the integrated
+// registry is granted by having the "get imagestreams/layers" permission on a given image
+// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both
+// spec and status for that tag to be removed. Image stream history is retained until an
+// administrator runs the prune operation, which removes references that are no longer in
+// use. To preserve a historical image, ensure there is a tag in spec pointing to that image
+// by its digest.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStream struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec describes the desired state of this stream
+ // +optional
+ Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // Status describes the current state of this stream
+ // +optional
+ Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ImageStreamSpec represents options for ImageStreams.
+type ImageStreamSpec struct {
+ // lookupPolicy controls how other resources reference images within this namespace.
+ LookupPolicy ImageLookupPolicy `json:"lookupPolicy,omitempty" protobuf:"bytes,3,opt,name=lookupPolicy"`
+ // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server
+ // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release.
+ // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.
+ DockerImageRepository string `json:"dockerImageRepository,omitempty" protobuf:"bytes,1,opt,name=dockerImageRepository"`
+ // tags map arbitrary string values to specific image locators
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Tags []TagReference `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tags"`
+}
+
+// ImageLookupPolicy describes how an image stream can be used to override the image references
+// used by pods, builds, and other resources in a namespace.
+type ImageLookupPolicy struct {
+ // local will change the docker short image references (like "mysql" or
+ // "php:latest") on objects in this namespace to the image ID whenever they match
+ // this image stream, instead of reaching out to a remote registry. The name will
+ // be fully qualified to an image ID if found. The tag's referencePolicy is taken
+ // into account on the replaced value. Only works within the current namespace.
+ Local bool `json:"local" protobuf:"varint,3,opt,name=local"`
+}
+
+// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.
+type TagReference struct {
+ // Name of the tag
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+ // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.
+ // +optional
+ Annotations map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"`
+ // Optional; if specified, a reference to another image that this tag should point to. Valid values
+ // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references
+ // can only reference a tag within this same ImageStream.
+ From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"`
+ // Reference states if the tag will be imported. Default value is false, which means the tag will
+ // be imported.
+ Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"`
+ // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference
+ // is changed the generation is set to match the current stream generation (which is incremented every
+ // time spec is changed). Other processes in the system like the image importer observe that the
+ // generation of spec tag is newer than the generation recorded in the status and use that as a trigger
+ // to import the newest remote tag. To trigger a new import, clients may set this value to zero which
+ // will reset the generation to the latest stream generation. Legacy clients will send this value as
+ // nil which will be merged with the current tag generation.
+ // +optional
+ Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"`
+ // ImportPolicy is information that controls how images may be imported by the server.
+ ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"`
+ // ReferencePolicy defines how other components should consume the image.
+ ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"`
+}
+
+// TagImportPolicy controls how images related to this tag will be imported.
+type TagImportPolicy struct {
+ // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.
+ Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"`
+ // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported
+ Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"`
+ // ImportMode describes how to import an image manifest.
+ ImportMode ImportModeType `json:"importMode,omitempty" protobuf:"bytes,3,opt,name=importMode,casttype=ImportModeType"`
+}
+
+// ImportModeType describes how to import an image manifest.
+type ImportModeType string
+
+const (
+ // ImportModeLegacy indicates that the legacy behaviour should be used.
+ // For manifest lists, the legacy behaviour will discard the manifest list and import a single
+ // sub-manifest. In this case, the platform is chosen in the following order of priority:
+ // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list.
+ // This mode is the default.
+ ImportModeLegacy ImportModeType = "Legacy"
+ // ImportModePreserveOriginal indicates that the original manifest will be preserved.
+ // For manifest lists, the manifest list and all its sub-manifests will be imported.
+ ImportModePreserveOriginal ImportModeType = "PreserveOriginal"
+)
+
+// TagReferencePolicyType describes how pull-specs for images in an image stream tag are generated when
+// image change triggers are fired.
+type TagReferencePolicyType string
+
+const (
+ // SourceTagReferencePolicy indicates the image's original location should be used when the image stream tag
+ // is resolved into other resources (builds and deployment configurations).
+ SourceTagReferencePolicy TagReferencePolicyType = "Source"
+ // LocalTagReferencePolicy indicates the image should prefer to pull via the local integrated registry,
+ // falling back to the remote location if the integrated registry has not been configured. The reference will
+ // use the internal DNS name or registry service IP.
+ LocalTagReferencePolicy TagReferencePolicyType = "Local"
+)
+
+// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when
+// image change triggers in deployment configs or builds are resolved. This allows the image stream
+// author to control how images are accessed.
+type TagReferencePolicy struct {
+ // Type determines how the image pull spec should be transformed when the image stream tag is used in
+ // deployment config triggers or new builds. The default value is `Source`, indicating the original
+ // location of the image should be used (if imported). The user may also specify `Local`, indicating
+ // that the pull spec should point to the integrated container image registry and leverage the registry's
+ // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this
+ // image to be managed from the image stream's namespace, so others on the platform can access a remote
+ // image but have no access to the remote secret. It also allows the image layers to be mirrored into
+ // the local registry which the images can still be pulled even if the upstream registry is unavailable.
+ Type TagReferencePolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagReferencePolicyType"`
+}
+
+// ImageStreamStatus contains information about the state of this image stream.
+type ImageStreamStatus struct {
+ // DockerImageRepository represents the effective location this stream may be accessed at.
+ // May be empty until the server determines where the repository is located
+ DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"`
+ // PublicDockerImageRepository represents the public location from where the image can
+ // be pulled outside the cluster. This field may be empty if the administrator
+ // has not exposed the integrated registry externally.
+ PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"`
+ // Tags are a historical record of images associated with each tag. The first entry in the
+ // TagEvent array is the currently tagged image.
+ // +patchMergeKey=tag
+ // +patchStrategy=merge
+ Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"`
+}
+
+// NamedTagEventList relates a tag to its image history.
+type NamedTagEventList struct {
+ // Tag is the tag for which the history is recorded
+ Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"`
+ // Standard object's metadata.
+ Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"`
+ // Conditions is an array of conditions that apply to the tag event list.
+ Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"`
+}
+
+// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.
+type TagEvent struct {
+ // Created holds the time the TagEvent was created
+ Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"`
+ // DockerImageReference is the string that can be used to pull this image
+ DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"`
+ // Image is the image
+ Image string `json:"image" protobuf:"bytes,3,opt,name=image"`
+ // Generation is the spec tag generation that resulted in this tag being updated
+ Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"`
+}
+
+type TagEventConditionType string
+
+// These are valid conditions of TagEvents.
+const (
+ // ImportSuccess with status False means the import of the specific tag failed
+ ImportSuccess TagEventConditionType = "ImportSuccess"
+)
+
+// TagEventCondition contains condition information for a tag event.
+type TagEventCondition struct {
+ // Type of tag event condition, currently only ImportSuccess
+ Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"`
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
+ // LastTransitionTIme is the time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // Reason is a brief machine readable explanation for the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // Message is a human readable description of the details about last transition, complementing reason.
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+ // Generation is the spec tag generation that this status corresponds to
+ Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"`
+}
+
+// +genclient
+// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch
+// +genclient:method=Create,verb=create,result=k8s.io/apimachinery/pkg/apis/meta/v1.Status
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageStreamMapping represents a mapping from a single image stream tag to a container
+// image as well as the reference to the container image stream the image came from. This
+// resource is used by privileged integrators to create an image resource and to associate
+// it with an image stream in the status tags field. Creating an ImageStreamMapping will
+// allow any user who can view the image stream to tag or pull that image, so only create
+// mappings where the user has proven they have access to the image contents directly.
+// The only operation supported for this resource is create and the metadata name and
+// namespace should be set to the image stream containing the tag that should be updated.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStreamMapping struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Image is a container image.
+ Image Image `json:"image" protobuf:"bytes,2,opt,name=image"`
+ // Tag is a string value this image can be located with inside the stream.
+ Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=get,list,create,update,delete
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream.
+// Use this resource to interact with the tags and images in an image stream by tag, or
+// to see the image details for a particular tag. The image associated with this resource
+// is the most recently successfully tagged, imported, or pushed image (as described in the
+// image stream status.tags.items list for this tag). If an import is in progress or has
+// failed the previous image will be shown. Deleting an image stream tag clears both the
+// status and spec fields of an image stream. If no image can be retrieved for a given tag,
+// a not found error will be returned.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStreamTag struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // tag is the spec tag associated with this image stream tag, and it may be null
+ // if only pushes have occurred to this image stream.
+ Tag *TagReference `json:"tag" protobuf:"bytes,2,opt,name=tag"`
+
+ // generation is the current generation of the tagged image - if tag is provided
+ // and this value is not equal to the tag generation, a user has requested an
+ // import that has not completed, or conditions will be filled out indicating any
+ // error.
+ Generation int64 `json:"generation" protobuf:"varint,3,opt,name=generation"`
+
+ // lookupPolicy indicates whether this tag will handle image references in this
+ // namespace.
+ LookupPolicy ImageLookupPolicy `json:"lookupPolicy" protobuf:"varint,6,opt,name=lookupPolicy"`
+
+ // conditions is an array of conditions that apply to the image stream tag.
+ Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,4,rep,name=conditions"`
+
+ // image associated with the ImageStream and tag.
+ Image Image `json:"image" protobuf:"bytes,5,opt,name=image"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageStreamTagList is a list of ImageStreamTag objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStreamTagList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of image stream tags
+ Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=get,list,create,update,delete
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageTag represents a single tag within an image stream and includes the spec,
+// the status history, and the currently referenced image (if any) of the provided
+// tag. This type replaces the ImageStreamTag by providing a full view of the tag.
+// ImageTags are returned for every spec or status tag present on the image stream.
+// If no tag exists in either form a not found error will be returned by the API.
+// A create operation will succeed if no spec tag has already been defined and the
+// spec field is set. Delete will remove both spec and status elements from the
+// image stream.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageTag struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec is the spec tag associated with this image stream tag, and it may be null
+ // if only pushes have occurred to this image stream.
+ Spec *TagReference `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // status is the status tag details associated with this image stream tag, and it
+ // may be null if no push or import has been performed.
+ Status *NamedTagEventList `json:"status" protobuf:"bytes,3,opt,name=status"`
+ // image is the details of the most recent image stream status tag, and it may be
+ // null if import has not completed or an administrator has deleted the image
+ // object. To verify this is the most recent image, you must verify the generation
+ // of the most recent status.items entry matches the spec tag (if a spec tag is
+ // set). This field will not be set when listing image tags.
+ Image *Image `json:"image" protobuf:"bytes,4,opt,name=image"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageTagList is a list of ImageTag objects. When listing image tags, the image
+// field is not populated. Tags are returned in alphabetical order by image stream
+// and then tag.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageTagList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of image stream tags
+ Items []ImageTag `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=get
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream.
+// User interfaces and regular users can use this resource to access the metadata details of
+// a tagged image in the image stream history for viewing, since Image resources are not
+// directly accessible to end users. A not found error will be returned if no such image is
+// referenced by a tag within the ImageStream. Images are created when spec tags are set on
+// an image stream that represent an image in an external registry, when pushing to the
+// integrated registry, or when tagging an existing image from one image stream to another.
+// The name of an image stream image is in the form "@", where the digest is
+// the content addressible identifier for the image (sha256:xxxxx...). You can use
+// ImageStreamImages as the from.kind of an image stream spec tag to reference an image
+// exactly. The only operations supported on the imagestreamimage endpoint are retrieving
+// the image.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStreamImage struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Image associated with the ImageStream and image name.
+ Image Image `json:"image" protobuf:"bytes,2,opt,name=image"`
+}
+
+// DockerImageReference points to a container image.
+type DockerImageReference struct {
+ // Registry is the registry that contains the container image
+ Registry string `protobuf:"bytes,1,opt,name=registry"`
+ // Namespace is the namespace that contains the container image
+ Namespace string `protobuf:"bytes,2,opt,name=namespace"`
+ // Name is the name of the container image
+ Name string `protobuf:"bytes,3,opt,name=name"`
+ // Tag is which tag of the container image is being referenced
+ Tag string `protobuf:"bytes,4,opt,name=tag"`
+ // ID is the identifier for the container image
+ ID string `protobuf:"bytes,5,opt,name=iD"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageStreamLayers describes information about the layers referenced by images in this
+// image stream.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStreamLayers struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // blobs is a map of blob name to metadata about the blob.
+ Blobs map[string]ImageLayerData `json:"blobs" protobuf:"bytes,2,rep,name=blobs"`
+ // images is a map between an image name and the names of the blobs and config that
+ // comprise the image.
+ Images map[string]ImageBlobReferences `json:"images" protobuf:"bytes,3,rep,name=images"`
+}
+
+// ImageBlobReferences describes the blob references within an image.
+type ImageBlobReferences struct {
+ // imageMissing is true if the image is referenced by the image stream but the image
+ // object has been deleted from the API by an administrator. When this field is set,
+ // layers and config fields may be empty and callers that depend on the image metadata
+ // should consider the image to be unavailable for download or viewing.
+ // +optional
+ ImageMissing bool `json:"imageMissing" protobuf:"varint,3,opt,name=imageMissing"`
+ // layers is the list of blobs that compose this image, from base layer to top layer.
+ // All layers referenced by this array will be defined in the blobs map. Some images
+ // may have zero layers.
+ // +optional
+ Layers []string `json:"layers" protobuf:"bytes,1,rep,name=layers"`
+ // config, if set, is the blob that contains the image config. Some images do
+ // not have separate config blobs and this field will be set to nil if so.
+ // +optional
+ Config *string `json:"config" protobuf:"bytes,2,opt,name=config"`
+ // manifests is the list of other image names that this image points
+ // to. For a single architecture image, it is empty. For a multi-arch
+ // image, it consists of the digests of single architecture images,
+ // such images shouldn't have layers nor config.
+ // +optional
+ Manifests []string `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"`
+}
+
+// ImageLayerData contains metadata about an image layer.
+type ImageLayerData struct {
+ // Size of the layer in bytes as defined by the underlying store. This field is
+ // optional if the necessary information about size is not available.
+ LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"`
+ // MediaType of the referenced object.
+ MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=create
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// The image stream import resource provides an easy way for a user to find and import container images
+// from other container image registries into the server. Individual images or an entire image repository may
+// be imported, and users may choose to see the results of the import prior to tagging the resulting
+// images into the specified image stream.
+//
+// This API is intended for end-user tools that need to see the metadata of the image prior to import
+// (for instance, to generate an application from it). Clients that know the desired image can continue
+// to create spec.tags directly into their image streams.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImageStreamImport struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec is a description of the images that the user wishes to import
+ Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // Status is the result of importing the image
+ Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ImageStreamImportSpec defines what images should be imported.
+type ImageStreamImportSpec struct {
+ // Import indicates whether to perform an import - if so, the specified tags are set on the spec
+ // and status of the image stream defined by the type meta.
+ Import bool `json:"import" protobuf:"varint,1,opt,name=import"`
+ // Repository is an optional import of an entire container image repository. A maximum limit on the
+ // number of tags imported this way is imposed by the server.
+ Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"`
+ // Images are a list of individual images to import.
+ Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"`
+}
+
+// ImageStreamImportStatus contains information about the status of an image stream import.
+type ImageStreamImportStatus struct {
+ // Import is the image stream that was successfully updated or created when 'to' was set.
+ Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"`
+ // Repository is set if spec.repository was set to the outcome of the import
+ Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"`
+ // Images is set with the result of importing spec.images
+ Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"`
+}
+
+// RepositoryImportSpec describes a request to import images from a container image repository.
+type RepositoryImportSpec struct {
+ // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
+
+ // ImportPolicy is the policy controlling how the image is imported
+ ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"`
+ // ReferencePolicy defines how other components should consume the image
+ ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"`
+ // IncludeManifest determines if the manifest for each image is returned in the response
+ IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"`
+}
+
+// RepositoryImportStatus describes the result of an image repository import
+type RepositoryImportStatus struct {
+ // Status reflects whether any failure occurred during import
+ Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"`
+ // Images is a list of images successfully retrieved by the import of the repository.
+ Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"`
+ // AdditionalTags are tags that exist in the repository but were not imported because
+ // a maximum limit of automatic imports was applied.
+ AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"`
+}
+
+// ImageImportSpec describes a request to import a specific image.
+type ImageImportSpec struct {
+ // From is the source of an image to import; only kind DockerImage is allowed
+ From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
+ // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used
+ To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"`
+
+ // ImportPolicy is the policy controlling how the image is imported
+ ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"`
+ // ReferencePolicy defines how other components should consume the image
+ ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"`
+ // IncludeManifest determines if the manifest for each image is returned in the response
+ IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"`
+}
+
+// ImageImportStatus describes the result of an image import.
+type ImageImportStatus struct {
+ // Status is the status of the image import, including errors encountered while retrieving the image
+ Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"`
+ // Image is the metadata of that image, if the image was located
+ Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+ // Tag is the tag this image was located under, if any
+ Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"`
+ // Manifests holds sub-manifests metadata when importing a manifest list
+ Manifests []Image `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SecretList is a list of Secret.
+// +openshift:compatibility-gen:level=1
+type SecretList corev1.SecretList
diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..953f70263c
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go
@@ -0,0 +1,1045 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageReference.
+func (in *DockerImageReference) DeepCopy() *DockerImageReference {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerImageReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Image) DeepCopyInto(out *Image) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.DockerImageMetadata.DeepCopyInto(&out.DockerImageMetadata)
+ if in.DockerImageLayers != nil {
+ in, out := &in.DockerImageLayers, &out.DockerImageLayers
+ *out = make([]ImageLayer, len(*in))
+ copy(*out, *in)
+ }
+ if in.Signatures != nil {
+ in, out := &in.Signatures, &out.Signatures
+ *out = make([]ImageSignature, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DockerImageSignatures != nil {
+ in, out := &in.DockerImageSignatures, &out.DockerImageSignatures
+ *out = make([][]byte, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ }
+ }
+ if in.DockerImageManifests != nil {
+ in, out := &in.DockerImageManifests, &out.DockerImageManifests
+ *out = make([]ImageManifest, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
+func (in *Image) DeepCopy() *Image {
+ if in == nil {
+ return nil
+ }
+ out := new(Image)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Image) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) {
+ *out = *in
+ if in.Layers != nil {
+ in, out := &in.Layers, &out.Layers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(string)
+ **out = **in
+ }
+ if in.Manifests != nil {
+ in, out := &in.Manifests, &out.Manifests
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences.
+func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageBlobReferences)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) {
+ *out = *in
+ out.From = in.From
+ if in.To != nil {
+ in, out := &in.To, &out.To
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ out.ImportPolicy = in.ImportPolicy
+ out.ReferencePolicy = in.ReferencePolicy
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportSpec.
+func (in *ImageImportSpec) DeepCopy() *ImageImportSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageImportSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageImportStatus) DeepCopyInto(out *ImageImportStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(Image)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Manifests != nil {
+ in, out := &in.Manifests, &out.Manifests
+ *out = make([]Image, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportStatus.
+func (in *ImageImportStatus) DeepCopy() *ImageImportStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageImportStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageLayer) DeepCopyInto(out *ImageLayer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer.
+func (in *ImageLayer) DeepCopy() *ImageLayer {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageLayer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) {
+ *out = *in
+ if in.LayerSize != nil {
+ in, out := &in.LayerSize, &out.LayerSize
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData.
+func (in *ImageLayerData) DeepCopy() *ImageLayerData {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageLayerData)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageList) DeepCopyInto(out *ImageList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Image, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
+func (in *ImageList) DeepCopy() *ImageList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageLookupPolicy) DeepCopyInto(out *ImageLookupPolicy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLookupPolicy.
+func (in *ImageLookupPolicy) DeepCopy() *ImageLookupPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageLookupPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageManifest) DeepCopyInto(out *ImageManifest) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManifest.
+func (in *ImageManifest) DeepCopy() *ImageManifest {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageManifest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageSignature) DeepCopyInto(out *ImageSignature) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Content != nil {
+ in, out := &in.Content, &out.Content
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]SignatureCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SignedClaims != nil {
+ in, out := &in.SignedClaims, &out.SignedClaims
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Created != nil {
+ in, out := &in.Created, &out.Created
+ *out = (*in).DeepCopy()
+ }
+ if in.IssuedBy != nil {
+ in, out := &in.IssuedBy, &out.IssuedBy
+ *out = new(SignatureIssuer)
+ **out = **in
+ }
+ if in.IssuedTo != nil {
+ in, out := &in.IssuedTo, &out.IssuedTo
+ *out = new(SignatureSubject)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSignature.
+func (in *ImageSignature) DeepCopy() *ImageSignature {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageSignature)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageSignature) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStream) DeepCopyInto(out *ImageStream) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStream.
+func (in *ImageStream) DeepCopy() *ImageStream {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStream)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStream) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamImage) DeepCopyInto(out *ImageStreamImage) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Image.DeepCopyInto(&out.Image)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImage.
+func (in *ImageStreamImage) DeepCopy() *ImageStreamImage {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamImage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStreamImage) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamImport) DeepCopyInto(out *ImageStreamImport) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImport.
+func (in *ImageStreamImport) DeepCopy() *ImageStreamImport {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamImport)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStreamImport) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamImportSpec) DeepCopyInto(out *ImageStreamImportSpec) {
+ *out = *in
+ if in.Repository != nil {
+ in, out := &in.Repository, &out.Repository
+ *out = new(RepositoryImportSpec)
+ **out = **in
+ }
+ if in.Images != nil {
+ in, out := &in.Images, &out.Images
+ *out = make([]ImageImportSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportSpec.
+func (in *ImageStreamImportSpec) DeepCopy() *ImageStreamImportSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamImportSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamImportStatus) DeepCopyInto(out *ImageStreamImportStatus) {
+ *out = *in
+ if in.Import != nil {
+ in, out := &in.Import, &out.Import
+ *out = new(ImageStream)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Repository != nil {
+ in, out := &in.Repository, &out.Repository
+ *out = new(RepositoryImportStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Images != nil {
+ in, out := &in.Images, &out.Images
+ *out = make([]ImageImportStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportStatus.
+func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamImportStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Blobs != nil {
+ in, out := &in.Blobs, &out.Blobs
+ *out = make(map[string]ImageLayerData, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.Images != nil {
+ in, out := &in.Images, &out.Images
+ *out = make(map[string]ImageBlobReferences, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers.
+func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamLayers)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStreamLayers) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImageStream, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamList.
+func (in *ImageStreamList) DeepCopy() *ImageStreamList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStreamList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamMapping) DeepCopyInto(out *ImageStreamMapping) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Image.DeepCopyInto(&out.Image)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamMapping.
+func (in *ImageStreamMapping) DeepCopy() *ImageStreamMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStreamMapping) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamSpec) DeepCopyInto(out *ImageStreamSpec) {
+ *out = *in
+ out.LookupPolicy = in.LookupPolicy
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]TagReference, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamSpec.
+func (in *ImageStreamSpec) DeepCopy() *ImageStreamSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamStatus) DeepCopyInto(out *ImageStreamStatus) {
+ *out = *in
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]NamedTagEventList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamStatus.
+func (in *ImageStreamStatus) DeepCopy() *ImageStreamStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamTag) DeepCopyInto(out *ImageStreamTag) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Tag != nil {
+ in, out := &in.Tag, &out.Tag
+ *out = new(TagReference)
+ (*in).DeepCopyInto(*out)
+ }
+ out.LookupPolicy = in.LookupPolicy
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]TagEventCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Image.DeepCopyInto(&out.Image)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTag.
+func (in *ImageStreamTag) DeepCopy() *ImageStreamTag {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStreamTag) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageStreamTagList) DeepCopyInto(out *ImageStreamTagList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImageStreamTag, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagList.
+func (in *ImageStreamTagList) DeepCopy() *ImageStreamTagList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageStreamTagList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageStreamTagList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageTag) DeepCopyInto(out *ImageTag) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Spec != nil {
+ in, out := &in.Spec, &out.Spec
+ *out = new(TagReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Status != nil {
+ in, out := &in.Status, &out.Status
+ *out = new(NamedTagEventList)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Image != nil {
+ in, out := &in.Image, &out.Image
+ *out = new(Image)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTag.
+func (in *ImageTag) DeepCopy() *ImageTag {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageTag) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageTagList) DeepCopyInto(out *ImageTagList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImageTag, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagList.
+func (in *ImageTagList) DeepCopy() *ImageTagList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageTagList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageTagList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedTagEventList) DeepCopyInto(out *NamedTagEventList) {
+ *out = *in
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]TagEvent, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]TagEventCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedTagEventList.
+func (in *NamedTagEventList) DeepCopy() *NamedTagEventList {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedTagEventList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RepositoryImportSpec) DeepCopyInto(out *RepositoryImportSpec) {
+ *out = *in
+ out.From = in.From
+ out.ImportPolicy = in.ImportPolicy
+ out.ReferencePolicy = in.ReferencePolicy
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportSpec.
+func (in *RepositoryImportSpec) DeepCopy() *RepositoryImportSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(RepositoryImportSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RepositoryImportStatus) DeepCopyInto(out *RepositoryImportStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ if in.Images != nil {
+ in, out := &in.Images, &out.Images
+ *out = make([]ImageImportStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AdditionalTags != nil {
+ in, out := &in.AdditionalTags, &out.AdditionalTags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportStatus.
+func (in *RepositoryImportStatus) DeepCopy() *RepositoryImportStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RepositoryImportStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretList) DeepCopyInto(out *SecretList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]corev1.Secret, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList.
+func (in *SecretList) DeepCopy() *SecretList {
+ if in == nil {
+ return nil
+ }
+ out := new(SecretList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SignatureCondition) DeepCopyInto(out *SignatureCondition) {
+ *out = *in
+ in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureCondition.
+func (in *SignatureCondition) DeepCopy() *SignatureCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(SignatureCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SignatureGenericEntity) DeepCopyInto(out *SignatureGenericEntity) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureGenericEntity.
+func (in *SignatureGenericEntity) DeepCopy() *SignatureGenericEntity {
+ if in == nil {
+ return nil
+ }
+ out := new(SignatureGenericEntity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SignatureIssuer) DeepCopyInto(out *SignatureIssuer) {
+ *out = *in
+ out.SignatureGenericEntity = in.SignatureGenericEntity
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureIssuer.
+func (in *SignatureIssuer) DeepCopy() *SignatureIssuer {
+ if in == nil {
+ return nil
+ }
+ out := new(SignatureIssuer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SignatureSubject) DeepCopyInto(out *SignatureSubject) {
+ *out = *in
+ out.SignatureGenericEntity = in.SignatureGenericEntity
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureSubject.
+func (in *SignatureSubject) DeepCopy() *SignatureSubject {
+ if in == nil {
+ return nil
+ }
+ out := new(SignatureSubject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TagEvent) DeepCopyInto(out *TagEvent) {
+ *out = *in
+ in.Created.DeepCopyInto(&out.Created)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEvent.
+func (in *TagEvent) DeepCopy() *TagEvent {
+ if in == nil {
+ return nil
+ }
+ out := new(TagEvent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TagEventCondition) DeepCopyInto(out *TagEventCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEventCondition.
+func (in *TagEventCondition) DeepCopy() *TagEventCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(TagEventCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TagImportPolicy) DeepCopyInto(out *TagImportPolicy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImportPolicy.
+func (in *TagImportPolicy) DeepCopy() *TagImportPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(TagImportPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TagReference) DeepCopyInto(out *TagReference) {
+ *out = *in
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = new(corev1.ObjectReference)
+ **out = **in
+ }
+ if in.Generation != nil {
+ in, out := &in.Generation, &out.Generation
+ *out = new(int64)
+ **out = **in
+ }
+ out.ImportPolicy = in.ImportPolicy
+ out.ReferencePolicy = in.ReferencePolicy
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReference.
+func (in *TagReference) DeepCopy() *TagReference {
+ if in == nil {
+ return nil
+ }
+ out := new(TagReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TagReferencePolicy) DeepCopyInto(out *TagReferencePolicy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReferencePolicy.
+func (in *TagReferencePolicy) DeepCopy() *TagReferencePolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(TagReferencePolicy)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..ec7fc2b457
--- /dev/null
+++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,444 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_DockerImageReference = map[string]string{
+ "": "DockerImageReference points to a container image.",
+ "Registry": "Registry is the registry that contains the container image",
+ "Namespace": "Namespace is the namespace that contains the container image",
+ "Name": "Name is the name of the container image",
+ "Tag": "Tag is which tag of the container image is being referenced",
+ "ID": "ID is the identifier for the container image",
+}
+
+func (DockerImageReference) SwaggerDoc() map[string]string {
+ return map_DockerImageReference
+}
+
+var map_Image = map[string]string{
+ "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.",
+ "dockerImageMetadata": "DockerImageMetadata contains metadata about this image",
+ "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"",
+ "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest",
+ "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.",
+ "signatures": "Signatures holds all signatures of the image.",
+ "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.",
+ "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.",
+ "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.",
+ "dockerImageManifests": "DockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.",
+}
+
+func (Image) SwaggerDoc() map[string]string {
+ return map_Image
+}
+
+var map_ImageBlobReferences = map[string]string{
+ "": "ImageBlobReferences describes the blob references within an image.",
+ "imageMissing": "imageMissing is true if the image is referenced by the image stream but the image object has been deleted from the API by an administrator. When this field is set, layers and config fields may be empty and callers that depend on the image metadata should consider the image to be unavailable for download or viewing.",
+ "layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.",
+ "config": "config, if set, is the blob that contains the image config. Some images do not have separate config blobs and this field will be set to nil if so.",
+ "manifests": "manifests is the list of other image names that this image points to. For a single architecture image, it is empty. For a multi-arch image, it consists of the digests of single architecture images, such images shouldn't have layers nor config.",
+}
+
+func (ImageBlobReferences) SwaggerDoc() map[string]string {
+ return map_ImageBlobReferences
+}
+
+var map_ImageImportSpec = map[string]string{
+ "": "ImageImportSpec describes a request to import a specific image.",
+ "from": "From is the source of an image to import; only kind DockerImage is allowed",
+ "to": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used",
+ "importPolicy": "ImportPolicy is the policy controlling how the image is imported",
+ "referencePolicy": "ReferencePolicy defines how other components should consume the image",
+ "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response",
+}
+
+func (ImageImportSpec) SwaggerDoc() map[string]string {
+ return map_ImageImportSpec
+}
+
+var map_ImageImportStatus = map[string]string{
+ "": "ImageImportStatus describes the result of an image import.",
+ "status": "Status is the status of the image import, including errors encountered while retrieving the image",
+ "image": "Image is the metadata of that image, if the image was located",
+ "tag": "Tag is the tag this image was located under, if any",
+ "manifests": "Manifests holds sub-manifests metadata when importing a manifest list",
+}
+
+func (ImageImportStatus) SwaggerDoc() map[string]string {
+ return map_ImageImportStatus
+}
+
+var map_ImageLayer = map[string]string{
+ "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.",
+ "name": "Name of the layer as defined by the underlying store.",
+ "size": "Size of the layer in bytes as defined by the underlying store.",
+ "mediaType": "MediaType of the referenced object.",
+}
+
+func (ImageLayer) SwaggerDoc() map[string]string {
+ return map_ImageLayer
+}
+
+var map_ImageLayerData = map[string]string{
+ "": "ImageLayerData contains metadata about an image layer.",
+ "size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.",
+ "mediaType": "MediaType of the referenced object.",
+}
+
+func (ImageLayerData) SwaggerDoc() map[string]string {
+ return map_ImageLayerData
+}
+
+var map_ImageList = map[string]string{
+ "": "ImageList is a list of Image objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of images",
+}
+
+func (ImageList) SwaggerDoc() map[string]string {
+ return map_ImageList
+}
+
+var map_ImageLookupPolicy = map[string]string{
+ "": "ImageLookupPolicy describes how an image stream can be used to override the image references used by pods, builds, and other resources in a namespace.",
+ "local": "local will change the docker short image references (like \"mysql\" or \"php:latest\") on objects in this namespace to the image ID whenever they match this image stream, instead of reaching out to a remote registry. The name will be fully qualified to an image ID if found. The tag's referencePolicy is taken into account on the replaced value. Only works within the current namespace.",
+}
+
+func (ImageLookupPolicy) SwaggerDoc() map[string]string {
+ return map_ImageLookupPolicy
+}
+
+var map_ImageManifest = map[string]string{
+ "": "ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular Image object.",
+ "digest": "Digest is the unique identifier for the manifest. It refers to an Image object.",
+ "mediaType": "MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.",
+ "manifestSize": "ManifestSize represents the size of the raw object contents, in bytes.",
+ "architecture": "Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.",
+ "os": "OS specifies the operating system, for example `linux`.",
+ "variant": "Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.",
+}
+
+func (ImageManifest) SwaggerDoc() map[string]string {
+ return map_ImageManifest
+}
+
+var map_ImageSignature = map[string]string{
+ "": "ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims as long as the signature is trusted. Based on this information it is possible to restrict runnable images to those matching cluster-wide policy. Mandatory fields should be parsed by clients doing image verification. The others are parsed from signature's content by the server. They serve just an informative purpose.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "type": "Required: Describes a type of stored blob.",
+ "content": "Required: An opaque binary string which is an image's signature.",
+ "conditions": "Conditions represent the latest available observations of a signature's current state.",
+ "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").",
+ "signedClaims": "Contains claims from the signature.",
+ "created": "If specified, it is the time of signature's creation.",
+ "issuedBy": "If specified, it holds information about an issuer of signing certificate or key (a person or entity who signed the signing certificate or key).",
+ "issuedTo": "If specified, it holds information about a subject of signing certificate or key (a person or entity who signed the image).",
+}
+
+func (ImageSignature) SwaggerDoc() map[string]string {
+ return map_ImageSignature
+}
+
+var map_ImageStream = map[string]string{
+ "": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec describes the desired state of this stream",
+ "status": "Status describes the current state of this stream",
+}
+
+func (ImageStream) SwaggerDoc() map[string]string {
+ return map_ImageStream
+}
+
+var map_ImageStreamImage = map[string]string{
+ "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"@\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "image": "Image associated with the ImageStream and image name.",
+}
+
+func (ImageStreamImage) SwaggerDoc() map[string]string {
+ return map_ImageStreamImage
+}
+
+var map_ImageStreamImport = map[string]string{
+ "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec is a description of the images that the user wishes to import",
+ "status": "Status is the result of importing the image",
+}
+
+func (ImageStreamImport) SwaggerDoc() map[string]string {
+ return map_ImageStreamImport
+}
+
+var map_ImageStreamImportSpec = map[string]string{
+ "": "ImageStreamImportSpec defines what images should be imported.",
+ "import": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.",
+ "repository": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.",
+ "images": "Images are a list of individual images to import.",
+}
+
+func (ImageStreamImportSpec) SwaggerDoc() map[string]string {
+ return map_ImageStreamImportSpec
+}
+
+var map_ImageStreamImportStatus = map[string]string{
+ "": "ImageStreamImportStatus contains information about the status of an image stream import.",
+ "import": "Import is the image stream that was successfully updated or created when 'to' was set.",
+ "repository": "Repository is set if spec.repository was set to the outcome of the import",
+ "images": "Images is set with the result of importing spec.images",
+}
+
+func (ImageStreamImportStatus) SwaggerDoc() map[string]string {
+ return map_ImageStreamImportStatus
+}
+
+var map_ImageStreamLayers = map[string]string{
+ "": "ImageStreamLayers describes information about the layers referenced by images in this image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "blobs": "blobs is a map of blob name to metadata about the blob.",
+ "images": "images is a map between an image name and the names of the blobs and config that comprise the image.",
+}
+
+func (ImageStreamLayers) SwaggerDoc() map[string]string {
+ return map_ImageStreamLayers
+}
+
+var map_ImageStreamList = map[string]string{
+ "": "ImageStreamList is a list of ImageStream objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of imageStreams",
+}
+
+func (ImageStreamList) SwaggerDoc() map[string]string {
+ return map_ImageStreamList
+}
+
+var map_ImageStreamMapping = map[string]string{
+ "": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "image": "Image is a container image.",
+ "tag": "Tag is a string value this image can be located with inside the stream.",
+}
+
+func (ImageStreamMapping) SwaggerDoc() map[string]string {
+ return map_ImageStreamMapping
+}
+
+var map_ImageStreamSpec = map[string]string{
+ "": "ImageStreamSpec represents options for ImageStreams.",
+ "lookupPolicy": "lookupPolicy controls how other resources reference images within this namespace.",
+ "dockerImageRepository": "dockerImageRepository is optional, if specified this stream is backed by a container repository on this server Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.",
+ "tags": "tags map arbitrary string values to specific image locators",
+}
+
+func (ImageStreamSpec) SwaggerDoc() map[string]string {
+ return map_ImageStreamSpec
+}
+
+var map_ImageStreamStatus = map[string]string{
+ "": "ImageStreamStatus contains information about the state of this image stream.",
+ "dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located",
+ "publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.",
+ "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.",
+}
+
+func (ImageStreamStatus) SwaggerDoc() map[string]string {
+ return map_ImageStreamStatus
+}
+
+var map_ImageStreamTag = map[string]string{
+ "": "ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. Use this resource to interact with the tags and images in an image stream by tag, or to see the image details for a particular tag. The image associated with this resource is the most recently successfully tagged, imported, or pushed image (as described in the image stream status.tags.items list for this tag). If an import is in progress or has failed the previous image will be shown. Deleting an image stream tag clears both the status and spec fields of an image stream. If no image can be retrieved for a given tag, a not found error will be returned.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "tag": "tag is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.",
+ "generation": "generation is the current generation of the tagged image - if tag is provided and this value is not equal to the tag generation, a user has requested an import that has not completed, or conditions will be filled out indicating any error.",
+ "lookupPolicy": "lookupPolicy indicates whether this tag will handle image references in this namespace.",
+ "conditions": "conditions is an array of conditions that apply to the image stream tag.",
+ "image": "image associated with the ImageStream and tag.",
+}
+
+func (ImageStreamTag) SwaggerDoc() map[string]string {
+ return map_ImageStreamTag
+}
+
+var map_ImageStreamTagList = map[string]string{
+ "": "ImageStreamTagList is a list of ImageStreamTag objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of image stream tags",
+}
+
+func (ImageStreamTagList) SwaggerDoc() map[string]string {
+ return map_ImageStreamTagList
+}
+
+var map_ImageTag = map[string]string{
+ "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.",
+ "status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.",
+ "image": "image is the details of the most recent image stream status tag, and it may be null if import has not completed or an administrator has deleted the image object. To verify this is the most recent image, you must verify the generation of the most recent status.items entry matches the spec tag (if a spec tag is set). This field will not be set when listing image tags.",
+}
+
+func (ImageTag) SwaggerDoc() map[string]string {
+ return map_ImageTag
+}
+
+var map_ImageTagList = map[string]string{
+ "": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of image stream tags",
+}
+
+func (ImageTagList) SwaggerDoc() map[string]string {
+ return map_ImageTagList
+}
+
+var map_NamedTagEventList = map[string]string{
+ "": "NamedTagEventList relates a tag to its image history.",
+ "tag": "Tag is the tag for which the history is recorded",
+ "items": "Standard object's metadata.",
+ "conditions": "Conditions is an array of conditions that apply to the tag event list.",
+}
+
+func (NamedTagEventList) SwaggerDoc() map[string]string {
+ return map_NamedTagEventList
+}
+
+var map_RepositoryImportSpec = map[string]string{
+ "": "RepositoryImportSpec describes a request to import images from a container image repository.",
+ "from": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed",
+ "importPolicy": "ImportPolicy is the policy controlling how the image is imported",
+ "referencePolicy": "ReferencePolicy defines how other components should consume the image",
+ "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response",
+}
+
+func (RepositoryImportSpec) SwaggerDoc() map[string]string {
+ return map_RepositoryImportSpec
+}
+
+var map_RepositoryImportStatus = map[string]string{
+ "": "RepositoryImportStatus describes the result of an image repository import",
+ "status": "Status reflects whether any failure occurred during import",
+ "images": "Images is a list of images successfully retrieved by the import of the repository.",
+ "additionalTags": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.",
+}
+
+func (RepositoryImportStatus) SwaggerDoc() map[string]string {
+ return map_RepositoryImportStatus
+}
+
+var map_SignatureCondition = map[string]string{
+ "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.",
+ "type": "Type of signature condition, Complete or Failed.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastProbeTime": "Last time the condition was checked.",
+ "lastTransitionTime": "Last time the condition transit from one status to another.",
+ "reason": "(brief) reason for the condition's last transition.",
+ "message": "Human readable message indicating details about last transition.",
+}
+
+func (SignatureCondition) SwaggerDoc() map[string]string {
+ return map_SignatureCondition
+}
+
+var map_SignatureGenericEntity = map[string]string{
+ "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.",
+ "organization": "Organization name.",
+ "commonName": "Common name (e.g. openshift-signing-service).",
+}
+
+func (SignatureGenericEntity) SwaggerDoc() map[string]string {
+ return map_SignatureGenericEntity
+}
+
+var map_SignatureIssuer = map[string]string{
+ "": "SignatureIssuer holds information about an issuer of signing certificate or key.",
+}
+
+func (SignatureIssuer) SwaggerDoc() map[string]string {
+ return map_SignatureIssuer
+}
+
+var map_SignatureSubject = map[string]string{
+ "": "SignatureSubject holds information about a person or entity who created the signature.",
+ "publicKeyID": "If present, it is a human readable key id of public key belonging to the subject used to verify image signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. 0x685ebe62bf278440).",
+}
+
+func (SignatureSubject) SwaggerDoc() map[string]string {
+ return map_SignatureSubject
+}
+
+var map_TagEvent = map[string]string{
+ "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.",
+ "created": "Created holds the time the TagEvent was created",
+ "dockerImageReference": "DockerImageReference is the string that can be used to pull this image",
+ "image": "Image is the image",
+ "generation": "Generation is the spec tag generation that resulted in this tag being updated",
+}
+
+func (TagEvent) SwaggerDoc() map[string]string {
+ return map_TagEvent
+}
+
+var map_TagEventCondition = map[string]string{
+ "": "TagEventCondition contains condition information for a tag event.",
+ "type": "Type of tag event condition, currently only ImportSuccess",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "LastTransitionTIme is the time the condition transitioned from one status to another.",
+ "reason": "Reason is a brief machine readable explanation for the condition's last transition.",
+ "message": "Message is a human readable description of the details about last transition, complementing reason.",
+ "generation": "Generation is the spec tag generation that this status corresponds to",
+}
+
+func (TagEventCondition) SwaggerDoc() map[string]string {
+ return map_TagEventCondition
+}
+
+var map_TagImportPolicy = map[string]string{
+ "": "TagImportPolicy controls how images related to this tag will be imported.",
+ "insecure": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.",
+ "scheduled": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported",
+ "importMode": "ImportMode describes how to import an image manifest.",
+}
+
+func (TagImportPolicy) SwaggerDoc() map[string]string {
+ return map_TagImportPolicy
+}
+
+var map_TagReference = map[string]string{
+ "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.",
+ "name": "Name of the tag",
+ "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.",
+ "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.",
+ "reference": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.",
+ "generation": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.",
+ "importPolicy": "ImportPolicy is information that controls how images may be imported by the server.",
+ "referencePolicy": "ReferencePolicy defines how other components should consume the image.",
+}
+
+func (TagReference) SwaggerDoc() map[string]string {
+ return map_TagReference
+}
+
+var map_TagReferencePolicy = map[string]string{
+ "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.",
+ "type": "Type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.",
+}
+
+func (TagReferencePolicy) SwaggerDoc() map[string]string {
+ return map_TagReferencePolicy
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/imageregistry/.codegen.yaml b/vendor/github.com/openshift/api/imageregistry/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/imageregistry/install.go b/vendor/github.com/openshift/api/imageregistry/install.go
new file mode 100644
index 0000000000..4536c8f403
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/install.go
@@ -0,0 +1,26 @@
+package imageregistry
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ imageregistryv1 "github.com/openshift/api/imageregistry/v1"
+)
+
+const (
+ GroupName = "imageregistry.operator.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(imageregistryv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/Makefile b/vendor/github.com/openshift/api/imageregistry/v1/Makefile
new file mode 100644
index 0000000000..ecef2e2705
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="imageregistry.operator.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/doc.go b/vendor/github.com/openshift/api/imageregistry/v1/doc.go
new file mode 100644
index 0000000000..32ad6f8141
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/doc.go
@@ -0,0 +1,3 @@
+// +k8s:deepcopy-gen=package
+// +groupName=imageregistry.operator.openshift.io
+package v1
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/register.go b/vendor/github.com/openshift/api/imageregistry/v1/register.go
new file mode 100644
index 0000000000..b5f708c1bf
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/register.go
@@ -0,0 +1,48 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ version = "v1"
+ groupName = "imageregistry.operator.openshift.io"
+)
+
+var (
+ scheme = runtime.NewScheme()
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ GroupVersion = schema.GroupVersion{Group: groupName, Version: version}
+ // Install is a function which adds this version to a scheme
+ Install = SchemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+func init() {
+ AddToScheme(scheme)
+}
+
+// addKnownTypes adds the set of types defined in this package to the supplied scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Config{},
+ &ConfigList{},
+ &ImagePruner{},
+ &ImagePrunerList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go
new file mode 100644
index 0000000000..9b3cc21a4d
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go
@@ -0,0 +1,608 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ConfigList is a slice of Config objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+ Items []Config `json:"items"`
+}
+
+const (
+ // StorageManagementStateManaged indicates the operator is managing the underlying storage.
+ StorageManagementStateManaged = "Managed"
+ // StorageManagementStateUnmanaged indicates the operator is not managing the underlying
+ // storage.
+ StorageManagementStateUnmanaged = "Unmanaged"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Config is the configuration object for a registry instance managed by
+// the registry operator
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=configs,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/519
+// +openshift:file-pattern=operatorOrdering=00
+type Config struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec ImageRegistrySpec `json:"spec"`
+ // +optional
+ Status ImageRegistryStatus `json:"status,omitempty"`
+}
+
+// ImageRegistrySpec defines the specs for the running registry.
+type ImageRegistrySpec struct {
+ // operatorSpec allows operator specific configuration to be made.
+ operatorv1.OperatorSpec `json:",inline"`
+ // httpSecret is the value needed by the registry to secure uploads, generated by default.
+ // +optional
+ HTTPSecret string `json:"httpSecret,omitempty"`
+ // proxy defines the proxy to be used when calling master api, upstream
+ // registries, etc.
+ // +optional
+ Proxy ImageRegistryConfigProxy `json:"proxy,omitempty"`
+ // storage details for configuring registry storage, e.g. S3 bucket
+ // coordinates.
+ // +optional
+ Storage ImageRegistryConfigStorage `json:"storage,omitempty"`
+ // readOnly indicates whether the registry instance should reject attempts
+ // to push new images or delete existing ones.
+ // +optional
+ ReadOnly bool `json:"readOnly,omitempty"`
+ // disableRedirect controls whether to route all data through the Registry,
+ // rather than redirecting to the backend.
+ // +optional
+ DisableRedirect bool `json:"disableRedirect,omitempty"`
+ // requests controls how many parallel requests a given registry instance
+ // will handle before queuing additional requests.
+ // +optional
+ // +structType=atomic
+ Requests ImageRegistryConfigRequests `json:"requests,omitempty"`
+ // defaultRoute indicates whether an external facing route for the registry
+ // should be created using the default generated hostname.
+ // +optional
+ DefaultRoute bool `json:"defaultRoute,omitempty"`
+ // routes defines additional external facing routes which should be
+ // created for the registry.
+ // +optional
+ // +listType=atomic
+ Routes []ImageRegistryConfigRoute `json:"routes,omitempty"`
+ // replicas determines the number of registry instances to run.
+ Replicas int32 `json:"replicas"`
+ // logging is deprecated, use logLevel instead.
+ // +optional
+ Logging int64 `json:"logging,omitempty"`
+ // resources defines the resource requests+limits for the registry pod.
+ // +optional
+ // +structType=atomic
+ Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
+ // nodeSelector defines the node selection constraints for the registry
+ // pod.
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ // tolerations defines the tolerations for the registry pod.
+ // +optional
+ // +listType=atomic
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+ // rolloutStrategy defines rollout strategy for the image registry
+ // deployment.
+ // +optional
+ // +kubebuilder:validation:Pattern=`^(RollingUpdate|Recreate)$`
+ RolloutStrategy string `json:"rolloutStrategy,omitempty"`
+ // affinity is a group of node affinity scheduling rules for the image registry pod(s).
+ // +optional
+ Affinity *corev1.Affinity `json:"affinity,omitempty"`
+ // topologySpreadConstraints specify how to spread matching pods among the given topology.
+ // +optional
+ // +listType=atomic
+ TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
+}
+
+// ImageRegistryStatus reports image registry operational status.
+type ImageRegistryStatus struct {
+ operatorv1.OperatorStatus `json:",inline"`
+
+ // storageManaged is deprecated, please refer to Storage.managementState
+ StorageManaged bool `json:"storageManaged"`
+ // storage indicates the current applied storage configuration of the
+ // registry.
+ Storage ImageRegistryConfigStorage `json:"storage"`
+}
+
+// ImageRegistryConfigProxy defines proxy configuration to be used by registry.
+type ImageRegistryConfigProxy struct {
+ // http defines the proxy to be used by the image registry when
+ // accessing HTTP endpoints.
+ // +optional
+ HTTP string `json:"http,omitempty"`
+ // https defines the proxy to be used by the image registry when
+ // accessing HTTPS endpoints.
+ // +optional
+ HTTPS string `json:"https,omitempty"`
+ // noProxy defines a comma-separated list of host names that shouldn't
+ // go through any proxy.
+ // +optional
+ NoProxy string `json:"noProxy,omitempty"`
+}
+
+// ImageRegistryConfigStorageS3CloudFront holds the configuration
+// to use Amazon Cloudfront as the storage middleware in a registry.
+// https://docs.docker.com/registry/configuration/#cloudfront
+type ImageRegistryConfigStorageS3CloudFront struct {
+ // baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served.
+ BaseURL string `json:"baseURL"`
+ // privateKey points to secret containing the private key, provided by AWS.
+ PrivateKey corev1.SecretKeySelector `json:"privateKey"`
+ // keypairID is key pair ID provided by AWS.
+ KeypairID string `json:"keypairID"`
+ // duration is the duration of the Cloudfront session.
+ // +optional
+ // +kubebuilder:validation:Format=duration
+ Duration metav1.Duration `json:"duration,omitempty"`
+}
+
+// ImageRegistryConfigStorageEmptyDir is an place holder to be used when
+// when registry is leveraging ephemeral storage.
+type ImageRegistryConfigStorageEmptyDir struct{}
+
+// S3TrustedCASource references a config map with a CA certificate bundle in
+// the "openshift-config" namespace. The key for the bundle in the
+// config map is "ca-bundle.crt".
+type S3TrustedCASource struct {
+ // name is the metadata.name of the referenced config map.
+ // This field must adhere to standard config map naming restrictions.
+ // The name must consist solely of alphanumeric characters, hyphens (-)
+ // and periods (.). It has a maximum length of 253 characters.
+ // If this field is not specified or is empty string, the default trust
+ // bundle will be used.
+ // +kubebuilder:validation:MaxLength=253
+ // +kubebuilder:validation:Pattern=`^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
+ // +optional
+ Name string `json:"name"`
+}
+
+// ImageRegistryConfigStorageS3 holds the information to configure
+// the registry to use the AWS S3 service for backend storage
+// https://docs.docker.com/registry/storage-drivers/s3/
+type ImageRegistryConfigStorageS3 struct {
+ // bucket is the bucket name in which you want to store the registry's
+ // data.
+ // Optional, will be generated if not provided.
+ // +optional
+ Bucket string `json:"bucket,omitempty"`
+ // region is the AWS region in which your bucket exists.
+ // Optional, will be set based on the installed AWS Region.
+ // +optional
+ Region string `json:"region,omitempty"`
+ // regionEndpoint is the endpoint for S3 compatible storage services.
+ // It should be a valid URL with scheme, e.g. https://s3.example.com.
+ // Optional, defaults based on the Region that is provided.
+ // +optional
+ RegionEndpoint string `json:"regionEndpoint,omitempty"`
+ // chunkSizeMiB defines the size of the multipart upload chunks of the S3 API.
+ // The S3 API requires multipart upload chunks to be at least 5MiB.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.
+ // The current default value is 10 MiB.
+ // The value is an integer number of MiB.
+ // The minimum value is 5 and the maximum value is 5120 (5 GiB).
+ // +kubebuilder:validation:Minimum=5
+ // +kubebuilder:validation:Maximum=5120
+ // +openshift:enable:FeatureGate=ChunkSizeMiB
+ // +optional
+ ChunkSizeMiB int32 `json:"chunkSizeMiB,omitempty"`
+ // encrypt specifies whether the registry stores the image in encrypted
+ // format or not.
+ // Optional, defaults to false.
+ // +optional
+ Encrypt bool `json:"encrypt,omitempty"`
+ // keyID is the KMS key ID to use for encryption.
+ // Optional, Encrypt must be true, or this parameter is ignored.
+ // +optional
+ KeyID string `json:"keyID,omitempty"`
+ // cloudFront configures Amazon Cloudfront as the storage middleware in a
+ // registry.
+ // +optional
+ CloudFront *ImageRegistryConfigStorageS3CloudFront `json:"cloudFront,omitempty"`
+ // virtualHostedStyle enables using S3 virtual hosted style bucket paths with
+ // a custom RegionEndpoint
+ // Optional, defaults to false.
+ // +optional
+ VirtualHostedStyle bool `json:"virtualHostedStyle"`
+ // trustedCA is a reference to a config map containing a CA bundle. The
+ // image registry and its operator use certificates from this bundle to
+ // verify S3 server certificates.
+ //
+ // The namespace for the config map referenced by trustedCA is
+ // "openshift-config". The key for the bundle in the config map is
+ // "ca-bundle.crt".
+ // +optional
+ TrustedCA S3TrustedCASource `json:"trustedCA"`
+}
+
+// ImageRegistryConfigStorageGCS holds GCS configuration.
+type ImageRegistryConfigStorageGCS struct {
+ // bucket is the bucket name in which you want to store the registry's
+ // data.
+ // Optional, will be generated if not provided.
+ // +optional
+ Bucket string `json:"bucket,omitempty"`
+ // region is the GCS location in which your bucket exists.
+ // Optional, will be set based on the installed GCS Region.
+ // +optional
+ Region string `json:"region,omitempty"`
+ // projectID is the Project ID of the GCP project that this bucket should
+ // be associated with.
+ // +optional
+ ProjectID string `json:"projectID,omitempty"`
+ // keyID is the KMS key ID to use for encryption.
+ // Optional, buckets are encrypted by default on GCP.
+ // This allows for the use of a custom encryption key.
+ // +optional
+ KeyID string `json:"keyID,omitempty"`
+}
+
+// ImageRegistryConfigStorageSwift holds the information to configure
+// the registry to use the OpenStack Swift service for backend storage
+// https://docs.docker.com/registry/storage-drivers/swift/
+type ImageRegistryConfigStorageSwift struct {
+ // authURL defines the URL for obtaining an authentication token.
+ // +optional
+ AuthURL string `json:"authURL,omitempty"`
+ // authVersion specifies the OpenStack Auth's version.
+ // +optional
+ AuthVersion string `json:"authVersion,omitempty"`
+ // container defines the name of Swift container where to store the
+ // registry's data.
+ // +optional
+ Container string `json:"container,omitempty"`
+ // domain specifies Openstack's domain name for Identity v3 API.
+ // +optional
+ Domain string `json:"domain,omitempty"`
+ // domainID specifies Openstack's domain id for Identity v3 API.
+ // +optional
+ DomainID string `json:"domainID,omitempty"`
+ // tenant defines Openstack tenant name to be used by registry.
+ // +optional
+ Tenant string `json:"tenant,omitempty"`
+ // tenant defines Openstack tenant id to be used by registry.
+ // +optional
+ TenantID string `json:"tenantID,omitempty"`
+ // regionName defines Openstack's region in which container exists.
+ // +optional
+ RegionName string `json:"regionName,omitempty"`
+}
+
+// ImageRegistryConfigStoragePVC holds Persistent Volume Claims data to
+// be used by the registry.
+type ImageRegistryConfigStoragePVC struct {
+ // claim defines the Persisent Volume Claim's name to be used.
+ // +optional
+ Claim string `json:"claim,omitempty"`
+}
+
+// ImageRegistryConfigStorageAzure holds the information to configure
+// the registry to use Azure Blob Storage for backend storage.
+type ImageRegistryConfigStorageAzure struct {
+ // accountName defines the account to be used by the registry.
+ // +optional
+ AccountName string `json:"accountName,omitempty"`
+ // container defines Azure's container to be used by registry.
+ // +optional
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:MinLength=3
+ // +kubebuilder:validation:Pattern=`^[0-9a-z]+(-[0-9a-z]+)*$`
+ Container string `json:"container,omitempty"`
+ // cloudName is the name of the Azure cloud environment to be used by the
+ // registry. If empty, the operator will set it based on the infrastructure
+ // object.
+ // +optional
+ CloudName string `json:"cloudName,omitempty"`
+ // networkAccess defines the network access properties for the storage account.
+ // Defaults to type: External.
+ // +kubebuilder:default={"type": "External"}
+ // +optional
+ NetworkAccess *AzureNetworkAccess `json:"networkAccess,omitempty"`
+}
+
+// AzureNetworkAccess defines the network access properties for the storage account.
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Internal' ? true : !has(self.internal)",message="internal is forbidden when type is not Internal"
+// +union
+type AzureNetworkAccess struct {
+ // type is the network access level to be used for the storage account.
+ // type: Internal means the storage account will be private, type: External
+ // means the storage account will be publicly accessible.
+ // Internal storage accounts are only exposed within the cluster's vnet.
+ // External storage accounts are publicly exposed on the internet.
+ // When type: Internal is used, a vnetName, subNetName and privateEndpointName
+ // may optionally be specified. If unspecificed, the image registry operator
+ // will discover vnet and subnet names, and generate a privateEndpointName.
+ // Defaults to "External".
+ // +kubebuilder:default:="External"
+ // +unionDiscriminator
+ // +optional
+ Type AzureNetworkAccessType `json:"type,omitempty"`
+ // internal defines the vnet and subnet names to configure a private
+ // endpoint and connect it to the storage account in order to make it
+ // private.
+ // when type: Internal and internal is unset, the image registry operator
+ // will discover vnet and subnet names, and generate a private endpoint
+ // name.
+ // +optional
+ Internal *AzureNetworkAccessInternal `json:"internal,omitempty"`
+}
+
+type AzureNetworkAccessInternal struct {
+ // networkResourceGroupName is the resource group name where the cluster's vnet
+ // and subnet are. When omitted, the registry operator will use the cluster
+ // resource group (from in the infrastructure status).
+ // If you set a networkResourceGroupName on your install-config.yaml, that
+ // value will be used automatically (for clusters configured with publish:Internal).
+ // Note that both vnet and subnet must be in the same resource group.
+ // It must be between 1 and 90 characters in length and must consist only of
+ // alphanumeric characters, hyphens (-), periods (.) and underscores (_), and
+ // not end with a period.
+ // +kubebuilder:validation:MaxLength=90
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.-](?:[0-9A-Za-z_.-]*[0-9A-Za-z_-])?$`
+ // +optional
+ NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
+ // vnetName is the name of the vnet the registry operates in. When omitted,
+ // the registry operator will discover and set this by using the `kubernetes.io_cluster.`
+ // tag in the vnet resource. This tag is set automatically by the installer.
+ // Commonly, this will be the same vnet as the cluster.
+ // Advanced cluster network configurations should ensure the provided vnetName
+ // is the vnet of the nodes where the image registry pods are running from.
+ // It must be between 2 and 64 characters in length and must consist only of
+ // alphanumeric characters, hyphens (-), periods (.) and underscores (_).
+ // It must start with an alphanumeric character and end with an alphanumeric character or an underscore.
+ // +kubebuilder:validation:MaxLength=64
+ // +kubebuilder:validation:MinLength=2
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$`
+ // +optional
+ VNetName string `json:"vnetName,omitempty"`
+ // subnetName is the name of the subnet the registry operates in. When omitted,
+ // the registry operator will discover and set this by using the `kubernetes.io_cluster.`
+ // tag in the vnet resource, then using one of listed subnets.
+ // Advanced cluster network configurations that use network security groups
+ // to protect subnets should ensure the provided subnetName has access to
+ // Azure Storage service.
+ // It must be between 1 and 80 characters in length and must consist only of
+ // alphanumeric characters, hyphens (-), periods (.) and underscores (_).
+ // +kubebuilder:validation:MaxLength=80
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z](?:[0-9A-Za-z_.-]*[0-9A-Za-z_])?$`
+ // +optional
+ SubnetName string `json:"subnetName,omitempty"`
+ // privateEndpointName is the name of the private endpoint for the registry.
+ // When provided, the registry will use it as the name of the private endpoint
+ // it will create for the storage account. When omitted, the registry will
+ // generate one.
+ // It must be between 2 and 64 characters in length and must consist only of
+ // alphanumeric characters, hyphens (-), periods (.) and underscores (_).
+ // It must start with an alphanumeric character and end with an alphanumeric character or an underscore.
+ // +kubebuilder:validation:MaxLength=64
+ // +kubebuilder:validation:MinLength=2
+ // +kubebuilder:validation:Pattern=`^[0-9A-Za-z][0-9A-Za-z_.-]*[0-9A-Za-z_]$`
+ // +optional
+ PrivateEndpointName string `json:"privateEndpointName,omitempty"`
+}
+
+// AzureNetworkAccessType is the network access level to be used for the storage account.
+// +kubebuilder:validation:Enum:="Internal";"External"
+type AzureNetworkAccessType string
+
+const (
+ // AzureNetworkAccessTypeInternal means the storage account will be private
+ AzureNetworkAccessTypeInternal AzureNetworkAccessType = "Internal"
+ // AzureNetworkAccessTypeExternal means the storage account will be publicly accessible
+ AzureNetworkAccessTypeExternal AzureNetworkAccessType = "External"
+)
+
+// ImageRegistryConfigStorageIBMCOS holds the information to configure
+// the registry to use IBM Cloud Object Storage for backend storage.
+type ImageRegistryConfigStorageIBMCOS struct {
+ // bucket is the bucket name in which you want to store the registry's
+ // data.
+ // Optional, will be generated if not provided.
+ // +optional
+ Bucket string `json:"bucket,omitempty"`
+ // location is the IBM Cloud location in which your bucket exists.
+ // Optional, will be set based on the installed IBM Cloud location.
+ // +optional
+ Location string `json:"location,omitempty"`
+ // resourceGroupName is the name of the IBM Cloud resource group that this
+ // bucket and its service instance is associated with.
+ // Optional, will be set based on the installed IBM Cloud resource group.
+ // +optional
+ ResourceGroupName string `json:"resourceGroupName,omitempty"`
+ // resourceKeyCRN is the CRN of the IBM Cloud resource key that is created
+ // for the service instance. Commonly referred as a service credential and
+ // must contain HMAC type credentials.
+ // Optional, will be computed if not provided.
+ // +optional
+ // +kubebuilder:validation:Pattern=`^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+:resource-key:.+$`
+ ResourceKeyCRN string `json:"resourceKeyCRN,omitempty"`
+ // serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service
+ // instance that this bucket is associated with.
+ // Optional, will be computed if not provided.
+ // +optional
+ // +kubebuilder:validation:Pattern=`^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+::$`
+ ServiceInstanceCRN string `json:"serviceInstanceCRN,omitempty"`
+}
+
+// EndpointAccessibility defines the Alibaba VPC endpoint for storage
+type EndpointAccessibility string
+
+// AlibabaEncryptionMethod defines an enumerable type for the encryption mode
+type AlibabaEncryptionMethod string
+
+const (
+ // InternalEndpoint sets the VPC endpoint to internal
+ InternalEndpoint EndpointAccessibility = "Internal"
+ // PublicEndpoint sets the VPC endpoint to public
+ PublicEndpoint EndpointAccessibility = "Public"
+
+ // AES256 is an AlibabaEncryptionMethod. This means AES256 encryption
+ AES256 AlibabaEncryptionMethod = "AES256"
+ // KMS is an AlibabaEncryptionMethod. This means KMS encryption
+ KMS AlibabaEncryptionMethod = "KMS"
+)
+
+// EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod,
+// different pointers may be used
+type EncryptionAlibaba struct {
+ // Method defines the different encrytion modes available
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default is `AES256`.
+ // +kubebuilder:validation:Enum="KMS";"AES256"
+ // +kubebuilder:default="AES256"
+ // +optional
+ Method AlibabaEncryptionMethod `json:"method"`
+
+ // KMS (key management service) is an encryption type that holds the struct for KMS KeyID
+ // +optional
+ KMS *KMSEncryptionAlibaba `json:"kms,omitempty"`
+}
+
+type KMSEncryptionAlibaba struct {
+ // KeyID holds the KMS encryption key ID
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ KeyID string `json:"keyID"`
+}
+
+// ImageRegistryConfigStorageAlibabaOSS holds Alibaba Cloud OSS configuration.
+// Configures the registry to use Alibaba Cloud Object Storage Service for backend storage.
+// More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm)
+type ImageRegistryConfigStorageAlibabaOSS struct {
+ // Bucket is the bucket name in which you want to store the registry's data.
+ // About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm)
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default will be autogenerated in the form of -image-registry--
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:MinLength=3
+ // +kubebuilder:validation:Pattern=`^[0-9a-z]+(-[0-9a-z]+)*$`
+ // +optional
+ Bucket string `json:"bucket,omitempty"`
+ // Region is the Alibaba Cloud Region in which your bucket exists.
+ // For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html).
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default will be based on the installed Alibaba Cloud Region.
+ // +optional
+ Region string `json:"region,omitempty"`
+ // EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default is `Internal`.
+ // +kubebuilder:validation:Enum="Internal";"Public";""
+ // +kubebuilder:default="Internal"
+ // +optional
+ EndpointAccessibility EndpointAccessibility `json:"endpointAccessibility,omitempty"`
+ // Encryption specifies whether you would like your data encrypted on the server side.
+ // More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm)
+ // +optional
+ Encryption *EncryptionAlibaba `json:"encryption,omitempty"`
+}
+
+// ImageRegistryConfigStorage describes how the storage should be configured
+// for the image registry.
+type ImageRegistryConfigStorage struct {
+ // emptyDir represents ephemeral storage on the pod's host node.
+ // WARNING: this storage cannot be used with more than 1 replica and
+ // is not suitable for production use. When the pod is removed from a
+ // node for any reason, the data in the emptyDir is deleted forever.
+ // +optional
+ EmptyDir *ImageRegistryConfigStorageEmptyDir `json:"emptyDir,omitempty"`
+ // s3 represents configuration that uses Amazon Simple Storage Service.
+ // +optional
+ S3 *ImageRegistryConfigStorageS3 `json:"s3,omitempty"`
+ // gcs represents configuration that uses Google Cloud Storage.
+ // +optional
+ GCS *ImageRegistryConfigStorageGCS `json:"gcs,omitempty"`
+ // swift represents configuration that uses OpenStack Object Storage.
+ // +optional
+ Swift *ImageRegistryConfigStorageSwift `json:"swift,omitempty"`
+ // pvc represents configuration that uses a PersistentVolumeClaim.
+ // +optional
+ PVC *ImageRegistryConfigStoragePVC `json:"pvc,omitempty"`
+ // azure represents configuration that uses Azure Blob Storage.
+ // +optional
+ Azure *ImageRegistryConfigStorageAzure `json:"azure,omitempty"`
+ // ibmcos represents configuration that uses IBM Cloud Object Storage.
+ // +optional
+ IBMCOS *ImageRegistryConfigStorageIBMCOS `json:"ibmcos,omitempty"`
+ // Oss represents configuration that uses Alibaba Cloud Object Storage Service.
+ // +optional
+ OSS *ImageRegistryConfigStorageAlibabaOSS `json:"oss,omitempty"`
+ // managementState indicates if the operator manages the underlying
+ // storage unit. If Managed the operator will remove the storage when
+ // this operator gets Removed.
+ // +optional
+ // +kubebuilder:validation:Pattern=`^(Managed|Unmanaged)$`
+ ManagementState string `json:"managementState,omitempty"`
+}
+
+// ImageRegistryConfigRequests defines registry limits on requests read and write.
+type ImageRegistryConfigRequests struct {
+ // read defines limits for image registry's reads.
+ // +optional
+ Read ImageRegistryConfigRequestsLimits `json:"read,omitempty"`
+ // write defines limits for image registry's writes.
+ // +optional
+ Write ImageRegistryConfigRequestsLimits `json:"write,omitempty"`
+}
+
+// ImageRegistryConfigRequestsLimits holds configuration on the max, enqueued
+// and waiting registry's API requests.
+type ImageRegistryConfigRequestsLimits struct {
+ // maxRunning sets the maximum in flight api requests to the registry.
+ // +optional
+ MaxRunning int `json:"maxRunning,omitempty"`
+ // maxInQueue sets the maximum queued api requests to the registry.
+ // +optional
+ MaxInQueue int `json:"maxInQueue,omitempty"`
+ // maxWaitInQueue sets the maximum time a request can wait in the queue
+ // before being rejected.
+ // +optional
+ // +kubebuilder:validation:Format=duration
+ MaxWaitInQueue metav1.Duration `json:"maxWaitInQueue,omitempty"`
+}
+
+// ImageRegistryConfigRoute holds information on external route access to image
+// registry.
+type ImageRegistryConfigRoute struct {
+ // name of the route to be created.
+ Name string `json:"name"`
+ // hostname for the route.
+ // +optional
+ Hostname string `json:"hostname,omitempty"`
+ // secretName points to secret containing the certificates to be used
+ // by the route.
+ // +optional
+ SecretName string `json:"secretName,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go b/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go
new file mode 100644
index 0000000000..43aa2b5cf9
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go
@@ -0,0 +1,117 @@
+package v1
+
+import (
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImagePrunerList is a slice of ImagePruner objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ImagePrunerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+ Items []ImagePruner `json:"items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImagePruner is the configuration object for an image registry pruner
+// managed by the registry operator.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=imagepruners,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/555
+// +openshift:file-pattern=operatorOrdering=01
+type ImagePruner struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec ImagePrunerSpec `json:"spec"`
+ // +optional
+ Status ImagePrunerStatus `json:"status"`
+}
+
+// ImagePrunerSpec defines the specs for the running image pruner.
+type ImagePrunerSpec struct {
+ // schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron.
+ // Defaults to `0 0 * * *`.
+ // +optional
+ Schedule string `json:"schedule"`
+ // suspend specifies whether or not to suspend subsequent executions of this cronjob.
+ // Defaults to false.
+ // +optional
+ Suspend *bool `json:"suspend,omitempty"`
+ // keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved.
+ // Defaults to 3.
+ // +optional
+ KeepTagRevisions *int `json:"keepTagRevisions,omitempty"`
+ // keepYoungerThan specifies the minimum age in nanoseconds of an image and its referrers for it to be considered a candidate for pruning.
+ // DEPRECATED: This field is deprecated in favor of keepYoungerThanDuration. If both are set, this field is ignored and keepYoungerThanDuration takes precedence.
+ // +optional
+ KeepYoungerThan *time.Duration `json:"keepYoungerThan,omitempty"`
+ // keepYoungerThanDuration specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning.
+ // Defaults to 60m (60 minutes).
+ // +optional
+ // +kubebuilder:validation:Format=duration
+ KeepYoungerThanDuration *metav1.Duration `json:"keepYoungerThanDuration,omitempty"`
+ // resources defines the resource requests and limits for the image pruner pod.
+ // +optional
+ Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
+ // affinity is a group of node affinity scheduling rules for the image pruner pod.
+ // +optional
+ Affinity *corev1.Affinity `json:"affinity,omitempty"`
+ // nodeSelector defines the node selection constraints for the image pruner pod.
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ // tolerations defines the node tolerations for the image pruner pod.
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+ // successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain.
+ // Defaults to 3 if not set.
+ // +optional
+ SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
+ // failedJobsHistoryLimit specifies how many failed image pruner jobs to retain.
+ // Defaults to 3 if not set.
+ // +optional
+ FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"`
+ // ignoreInvalidImageReferences indicates whether the pruner can ignore
+ // errors while parsing image references.
+ // +optional
+ IgnoreInvalidImageReferences bool `json:"ignoreInvalidImageReferences,omitempty"`
+ // logLevel sets the level of log output for the pruner job.
+ //
+ // Valid values are: "Normal", "Debug", "Trace", "TraceAll".
+ // Defaults to "Normal".
+ // +optional
+ // +kubebuilder:default=Normal
+ LogLevel operatorv1.LogLevel `json:"logLevel,omitempty"`
+}
+
+// ImagePrunerStatus reports image pruner operational status.
+type ImagePrunerStatus struct {
+ // observedGeneration is the last generation change that has been applied.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+ // conditions is a list of conditions and their status.
+ // +optional
+ Conditions []operatorv1.OperatorCondition `json:"conditions,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..63f25fc19e
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go
@@ -0,0 +1,679 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ time "time"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureNetworkAccess) DeepCopyInto(out *AzureNetworkAccess) {
+ *out = *in
+ if in.Internal != nil {
+ in, out := &in.Internal, &out.Internal
+ *out = new(AzureNetworkAccessInternal)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkAccess.
+func (in *AzureNetworkAccess) DeepCopy() *AzureNetworkAccess {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureNetworkAccess)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureNetworkAccessInternal) DeepCopyInto(out *AzureNetworkAccessInternal) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureNetworkAccessInternal.
+func (in *AzureNetworkAccessInternal) DeepCopy() *AzureNetworkAccessInternal {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureNetworkAccessInternal)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigList) DeepCopyInto(out *ConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Config, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList.
+func (in *ConfigList) DeepCopy() *ConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EncryptionAlibaba) DeepCopyInto(out *EncryptionAlibaba) {
+ *out = *in
+ if in.KMS != nil {
+ in, out := &in.KMS, &out.KMS
+ *out = new(KMSEncryptionAlibaba)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAlibaba.
+func (in *EncryptionAlibaba) DeepCopy() *EncryptionAlibaba {
+ if in == nil {
+ return nil
+ }
+ out := new(EncryptionAlibaba)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePruner) DeepCopyInto(out *ImagePruner) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePruner.
+func (in *ImagePruner) DeepCopy() *ImagePruner {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePruner)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImagePruner) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePrunerList) DeepCopyInto(out *ImagePrunerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImagePruner, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerList.
+func (in *ImagePrunerList) DeepCopy() *ImagePrunerList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePrunerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImagePrunerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePrunerSpec) DeepCopyInto(out *ImagePrunerSpec) {
+ *out = *in
+ if in.Suspend != nil {
+ in, out := &in.Suspend, &out.Suspend
+ *out = new(bool)
+ **out = **in
+ }
+ if in.KeepTagRevisions != nil {
+ in, out := &in.KeepTagRevisions, &out.KeepTagRevisions
+ *out = new(int)
+ **out = **in
+ }
+ if in.KeepYoungerThan != nil {
+ in, out := &in.KeepYoungerThan, &out.KeepYoungerThan
+ *out = new(time.Duration)
+ **out = **in
+ }
+ if in.KeepYoungerThanDuration != nil {
+ in, out := &in.KeepYoungerThanDuration, &out.KeepYoungerThanDuration
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(corev1.ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Affinity != nil {
+ in, out := &in.Affinity, &out.Affinity
+ *out = new(corev1.Affinity)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SuccessfulJobsHistoryLimit != nil {
+ in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ if in.FailedJobsHistoryLimit != nil {
+ in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerSpec.
+func (in *ImagePrunerSpec) DeepCopy() *ImagePrunerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePrunerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePrunerStatus) DeepCopyInto(out *ImagePrunerStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]operatorv1.OperatorCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerStatus.
+func (in *ImagePrunerStatus) DeepCopy() *ImagePrunerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePrunerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigProxy) DeepCopyInto(out *ImageRegistryConfigProxy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigProxy.
+func (in *ImageRegistryConfigProxy) DeepCopy() *ImageRegistryConfigProxy {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigProxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigRequests) DeepCopyInto(out *ImageRegistryConfigRequests) {
+ *out = *in
+ out.Read = in.Read
+ out.Write = in.Write
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRequests.
+func (in *ImageRegistryConfigRequests) DeepCopy() *ImageRegistryConfigRequests {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigRequests)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigRequestsLimits) DeepCopyInto(out *ImageRegistryConfigRequestsLimits) {
+ *out = *in
+ out.MaxWaitInQueue = in.MaxWaitInQueue
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRequestsLimits.
+func (in *ImageRegistryConfigRequestsLimits) DeepCopy() *ImageRegistryConfigRequestsLimits {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigRequestsLimits)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigRoute) DeepCopyInto(out *ImageRegistryConfigRoute) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRoute.
+func (in *ImageRegistryConfigRoute) DeepCopy() *ImageRegistryConfigRoute {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigRoute)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorage) DeepCopyInto(out *ImageRegistryConfigStorage) {
+ *out = *in
+ if in.EmptyDir != nil {
+ in, out := &in.EmptyDir, &out.EmptyDir
+ *out = new(ImageRegistryConfigStorageEmptyDir)
+ **out = **in
+ }
+ if in.S3 != nil {
+ in, out := &in.S3, &out.S3
+ *out = new(ImageRegistryConfigStorageS3)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GCS != nil {
+ in, out := &in.GCS, &out.GCS
+ *out = new(ImageRegistryConfigStorageGCS)
+ **out = **in
+ }
+ if in.Swift != nil {
+ in, out := &in.Swift, &out.Swift
+ *out = new(ImageRegistryConfigStorageSwift)
+ **out = **in
+ }
+ if in.PVC != nil {
+ in, out := &in.PVC, &out.PVC
+ *out = new(ImageRegistryConfigStoragePVC)
+ **out = **in
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new(ImageRegistryConfigStorageAzure)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IBMCOS != nil {
+ in, out := &in.IBMCOS, &out.IBMCOS
+ *out = new(ImageRegistryConfigStorageIBMCOS)
+ **out = **in
+ }
+ if in.OSS != nil {
+ in, out := &in.OSS, &out.OSS
+ *out = new(ImageRegistryConfigStorageAlibabaOSS)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorage.
+func (in *ImageRegistryConfigStorage) DeepCopy() *ImageRegistryConfigStorage {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageAlibabaOSS) DeepCopyInto(out *ImageRegistryConfigStorageAlibabaOSS) {
+ *out = *in
+ if in.Encryption != nil {
+ in, out := &in.Encryption, &out.Encryption
+ *out = new(EncryptionAlibaba)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageAlibabaOSS.
+func (in *ImageRegistryConfigStorageAlibabaOSS) DeepCopy() *ImageRegistryConfigStorageAlibabaOSS {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageAlibabaOSS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageAzure) DeepCopyInto(out *ImageRegistryConfigStorageAzure) {
+ *out = *in
+ if in.NetworkAccess != nil {
+ in, out := &in.NetworkAccess, &out.NetworkAccess
+ *out = new(AzureNetworkAccess)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageAzure.
+func (in *ImageRegistryConfigStorageAzure) DeepCopy() *ImageRegistryConfigStorageAzure {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageAzure)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageEmptyDir) DeepCopyInto(out *ImageRegistryConfigStorageEmptyDir) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageEmptyDir.
+func (in *ImageRegistryConfigStorageEmptyDir) DeepCopy() *ImageRegistryConfigStorageEmptyDir {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageEmptyDir)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageGCS) DeepCopyInto(out *ImageRegistryConfigStorageGCS) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageGCS.
+func (in *ImageRegistryConfigStorageGCS) DeepCopy() *ImageRegistryConfigStorageGCS {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageGCS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageIBMCOS) DeepCopyInto(out *ImageRegistryConfigStorageIBMCOS) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageIBMCOS.
+func (in *ImageRegistryConfigStorageIBMCOS) DeepCopy() *ImageRegistryConfigStorageIBMCOS {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageIBMCOS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStoragePVC) DeepCopyInto(out *ImageRegistryConfigStoragePVC) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStoragePVC.
+func (in *ImageRegistryConfigStoragePVC) DeepCopy() *ImageRegistryConfigStoragePVC {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStoragePVC)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageS3) DeepCopyInto(out *ImageRegistryConfigStorageS3) {
+ *out = *in
+ if in.CloudFront != nil {
+ in, out := &in.CloudFront, &out.CloudFront
+ *out = new(ImageRegistryConfigStorageS3CloudFront)
+ (*in).DeepCopyInto(*out)
+ }
+ out.TrustedCA = in.TrustedCA
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageS3.
+func (in *ImageRegistryConfigStorageS3) DeepCopy() *ImageRegistryConfigStorageS3 {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageS3)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageS3CloudFront) DeepCopyInto(out *ImageRegistryConfigStorageS3CloudFront) {
+ *out = *in
+ in.PrivateKey.DeepCopyInto(&out.PrivateKey)
+ out.Duration = in.Duration
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageS3CloudFront.
+func (in *ImageRegistryConfigStorageS3CloudFront) DeepCopy() *ImageRegistryConfigStorageS3CloudFront {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageS3CloudFront)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryConfigStorageSwift) DeepCopyInto(out *ImageRegistryConfigStorageSwift) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageSwift.
+func (in *ImageRegistryConfigStorageSwift) DeepCopy() *ImageRegistryConfigStorageSwift {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryConfigStorageSwift)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistrySpec) DeepCopyInto(out *ImageRegistrySpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ out.Proxy = in.Proxy
+ in.Storage.DeepCopyInto(&out.Storage)
+ out.Requests = in.Requests
+ if in.Routes != nil {
+ in, out := &in.Routes, &out.Routes
+ *out = make([]ImageRegistryConfigRoute, len(*in))
+ copy(*out, *in)
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(corev1.ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Affinity != nil {
+ in, out := &in.Affinity, &out.Affinity
+ *out = new(corev1.Affinity)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TopologySpreadConstraints != nil {
+ in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
+ *out = make([]corev1.TopologySpreadConstraint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistrySpec.
+func (in *ImageRegistrySpec) DeepCopy() *ImageRegistrySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistrySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageRegistryStatus) DeepCopyInto(out *ImageRegistryStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ in.Storage.DeepCopyInto(&out.Storage)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryStatus.
+func (in *ImageRegistryStatus) DeepCopy() *ImageRegistryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageRegistryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KMSEncryptionAlibaba) DeepCopyInto(out *KMSEncryptionAlibaba) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSEncryptionAlibaba.
+func (in *KMSEncryptionAlibaba) DeepCopy() *KMSEncryptionAlibaba {
+ if in == nil {
+ return nil
+ }
+ out := new(KMSEncryptionAlibaba)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *S3TrustedCASource) DeepCopyInto(out *S3TrustedCASource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3TrustedCASource.
+func (in *S3TrustedCASource) DeepCopy() *S3TrustedCASource {
+ if in == nil {
+ return nil
+ }
+ out := new(S3TrustedCASource)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..a6ccc2262a
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,43 @@
+configs.imageregistry.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/519
+ CRDName: configs.imageregistry.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - ChunkSizeMiB
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: ""
+ GroupName: imageregistry.operator.openshift.io
+ HasStatus: true
+ KindName: Config
+ Labels: {}
+ PluralName: configs
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+imagepruners.imageregistry.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/555
+ CRDName: imagepruners.imageregistry.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: ""
+ GroupName: imageregistry.operator.openshift.io
+ HasStatus: true
+ KindName: ImagePruner
+ Labels: {}
+ PluralName: imagepruners
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..ec999f309b
--- /dev/null
+++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,334 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AzureNetworkAccess = map[string]string{
+ "": "AzureNetworkAccess defines the network access properties for the storage account.",
+ "type": "type is the network access level to be used for the storage account. type: Internal means the storage account will be private, type: External means the storage account will be publicly accessible. Internal storage accounts are only exposed within the cluster's vnet. External storage accounts are publicly exposed on the internet. When type: Internal is used, a vnetName, subNetName and privateEndpointName may optionally be specified. If unspecificed, the image registry operator will discover vnet and subnet names, and generate a privateEndpointName. Defaults to \"External\".",
+ "internal": "internal defines the vnet and subnet names to configure a private endpoint and connect it to the storage account in order to make it private. when type: Internal and internal is unset, the image registry operator will discover vnet and subnet names, and generate a private endpoint name.",
+}
+
+func (AzureNetworkAccess) SwaggerDoc() map[string]string {
+ return map_AzureNetworkAccess
+}
+
+var map_AzureNetworkAccessInternal = map[string]string{
+ "networkResourceGroupName": "networkResourceGroupName is the resource group name where the cluster's vnet and subnet are. When omitted, the registry operator will use the cluster resource group (from in the infrastructure status). If you set a networkResourceGroupName on your install-config.yaml, that value will be used automatically (for clusters configured with publish:Internal). Note that both vnet and subnet must be in the same resource group. It must be between 1 and 90 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_), and not end with a period.",
+ "vnetName": "vnetName is the name of the vnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource. This tag is set automatically by the installer. Commonly, this will be the same vnet as the cluster. Advanced cluster network configurations should ensure the provided vnetName is the vnet of the nodes where the image registry pods are running from. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore.",
+ "subnetName": "subnetName is the name of the subnet the registry operates in. When omitted, the registry operator will discover and set this by using the `kubernetes.io_cluster.` tag in the vnet resource, then using one of listed subnets. Advanced cluster network configurations that use network security groups to protect subnets should ensure the provided subnetName has access to Azure Storage service. It must be between 1 and 80 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_).",
+ "privateEndpointName": "privateEndpointName is the name of the private endpoint for the registry. When provided, the registry will use it as the name of the private endpoint it will create for the storage account. When omitted, the registry will generate one. It must be between 2 and 64 characters in length and must consist only of alphanumeric characters, hyphens (-), periods (.) and underscores (_). It must start with an alphanumeric character and end with an alphanumeric character or an underscore.",
+}
+
+func (AzureNetworkAccessInternal) SwaggerDoc() map[string]string {
+ return map_AzureNetworkAccessInternal
+}
+
+var map_Config = map[string]string{
+ "": "Config is the configuration object for a registry instance managed by the registry operator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (Config) SwaggerDoc() map[string]string {
+ return map_Config
+}
+
+var map_ConfigList = map[string]string{
+ "": "ConfigList is a slice of Config objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ConfigList) SwaggerDoc() map[string]string {
+ return map_ConfigList
+}
+
+var map_EncryptionAlibaba = map[string]string{
+ "": "EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod, different pointers may be used",
+ "method": "Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`.",
+ "kms": "KMS (key management service) is an encryption type that holds the struct for KMS KeyID",
+}
+
+func (EncryptionAlibaba) SwaggerDoc() map[string]string {
+ return map_EncryptionAlibaba
+}
+
+var map_ImageRegistryConfigProxy = map[string]string{
+ "": "ImageRegistryConfigProxy defines proxy configuration to be used by registry.",
+ "http": "http defines the proxy to be used by the image registry when accessing HTTP endpoints.",
+ "https": "https defines the proxy to be used by the image registry when accessing HTTPS endpoints.",
+ "noProxy": "noProxy defines a comma-separated list of host names that shouldn't go through any proxy.",
+}
+
+func (ImageRegistryConfigProxy) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigProxy
+}
+
+var map_ImageRegistryConfigRequests = map[string]string{
+ "": "ImageRegistryConfigRequests defines registry limits on requests read and write.",
+ "read": "read defines limits for image registry's reads.",
+ "write": "write defines limits for image registry's writes.",
+}
+
+func (ImageRegistryConfigRequests) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigRequests
+}
+
+var map_ImageRegistryConfigRequestsLimits = map[string]string{
+ "": "ImageRegistryConfigRequestsLimits holds configuration on the max, enqueued and waiting registry's API requests.",
+ "maxRunning": "maxRunning sets the maximum in flight api requests to the registry.",
+ "maxInQueue": "maxInQueue sets the maximum queued api requests to the registry.",
+ "maxWaitInQueue": "maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected.",
+}
+
+func (ImageRegistryConfigRequestsLimits) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigRequestsLimits
+}
+
+var map_ImageRegistryConfigRoute = map[string]string{
+ "": "ImageRegistryConfigRoute holds information on external route access to image registry.",
+ "name": "name of the route to be created.",
+ "hostname": "hostname for the route.",
+ "secretName": "secretName points to secret containing the certificates to be used by the route.",
+}
+
+func (ImageRegistryConfigRoute) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigRoute
+}
+
+var map_ImageRegistryConfigStorage = map[string]string{
+ "": "ImageRegistryConfigStorage describes how the storage should be configured for the image registry.",
+ "emptyDir": "emptyDir represents ephemeral storage on the pod's host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.",
+ "s3": "s3 represents configuration that uses Amazon Simple Storage Service.",
+ "gcs": "gcs represents configuration that uses Google Cloud Storage.",
+ "swift": "swift represents configuration that uses OpenStack Object Storage.",
+ "pvc": "pvc represents configuration that uses a PersistentVolumeClaim.",
+ "azure": "azure represents configuration that uses Azure Blob Storage.",
+ "ibmcos": "ibmcos represents configuration that uses IBM Cloud Object Storage.",
+ "oss": "Oss represents configuration that uses Alibaba Cloud Object Storage Service.",
+ "managementState": "managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed.",
+}
+
+func (ImageRegistryConfigStorage) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorage
+}
+
+var map_ImageRegistryConfigStorageAlibabaOSS = map[string]string{
+ "": "ImageRegistryConfigStorageAlibabaOSS holds Alibaba Cloud OSS configuration. Configures the registry to use Alibaba Cloud Object Storage Service for backend storage. More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm)",
+ "bucket": "Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry--",
+ "region": "Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region.",
+ "endpointAccessibility": "EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`.",
+ "encryption": "Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm)",
+}
+
+func (ImageRegistryConfigStorageAlibabaOSS) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageAlibabaOSS
+}
+
+var map_ImageRegistryConfigStorageAzure = map[string]string{
+ "": "ImageRegistryConfigStorageAzure holds the information to configure the registry to use Azure Blob Storage for backend storage.",
+ "accountName": "accountName defines the account to be used by the registry.",
+ "container": "container defines Azure's container to be used by registry.",
+ "cloudName": "cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object.",
+ "networkAccess": "networkAccess defines the network access properties for the storage account. Defaults to type: External.",
+}
+
+func (ImageRegistryConfigStorageAzure) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageAzure
+}
+
+var map_ImageRegistryConfigStorageEmptyDir = map[string]string{
+ "": "ImageRegistryConfigStorageEmptyDir is an place holder to be used when when registry is leveraging ephemeral storage.",
+}
+
+func (ImageRegistryConfigStorageEmptyDir) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageEmptyDir
+}
+
+var map_ImageRegistryConfigStorageGCS = map[string]string{
+ "": "ImageRegistryConfigStorageGCS holds GCS configuration.",
+ "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.",
+ "region": "region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region.",
+ "projectID": "projectID is the Project ID of the GCP project that this bucket should be associated with.",
+ "keyID": "keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key.",
+}
+
+func (ImageRegistryConfigStorageGCS) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageGCS
+}
+
+var map_ImageRegistryConfigStorageIBMCOS = map[string]string{
+ "": "ImageRegistryConfigStorageIBMCOS holds the information to configure the registry to use IBM Cloud Object Storage for backend storage.",
+ "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.",
+ "location": "location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location.",
+ "resourceGroupName": "resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group.",
+ "resourceKeyCRN": "resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided.",
+ "serviceInstanceCRN": "serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided.",
+}
+
+func (ImageRegistryConfigStorageIBMCOS) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageIBMCOS
+}
+
+var map_ImageRegistryConfigStoragePVC = map[string]string{
+ "": "ImageRegistryConfigStoragePVC holds Persistent Volume Claims data to be used by the registry.",
+ "claim": "claim defines the Persisent Volume Claim's name to be used.",
+}
+
+func (ImageRegistryConfigStoragePVC) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStoragePVC
+}
+
+var map_ImageRegistryConfigStorageS3 = map[string]string{
+ "": "ImageRegistryConfigStorageS3 holds the information to configure the registry to use the AWS S3 service for backend storage https://docs.docker.com/registry/storage-drivers/s3/",
+ "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.",
+ "region": "region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region.",
+ "regionEndpoint": "regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided.",
+ "chunkSizeMiB": "chunkSizeMiB defines the size of the multipart upload chunks of the S3 API. The S3 API requires multipart upload chunks to be at least 5MiB. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is 10 MiB. The value is an integer number of MiB. The minimum value is 5 and the maximum value is 5120 (5 GiB).",
+ "encrypt": "encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false.",
+ "keyID": "keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored.",
+ "cloudFront": "cloudFront configures Amazon Cloudfront as the storage middleware in a registry.",
+ "virtualHostedStyle": "virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false.",
+ "trustedCA": "trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates.\n\nThe namespace for the config map referenced by trustedCA is \"openshift-config\". The key for the bundle in the config map is \"ca-bundle.crt\".",
+}
+
+func (ImageRegistryConfigStorageS3) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageS3
+}
+
+var map_ImageRegistryConfigStorageS3CloudFront = map[string]string{
+ "": "ImageRegistryConfigStorageS3CloudFront holds the configuration to use Amazon Cloudfront as the storage middleware in a registry. https://docs.docker.com/registry/configuration/#cloudfront",
+ "baseURL": "baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served.",
+ "privateKey": "privateKey points to secret containing the private key, provided by AWS.",
+ "keypairID": "keypairID is key pair ID provided by AWS.",
+ "duration": "duration is the duration of the Cloudfront session.",
+}
+
+func (ImageRegistryConfigStorageS3CloudFront) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageS3CloudFront
+}
+
+var map_ImageRegistryConfigStorageSwift = map[string]string{
+ "": "ImageRegistryConfigStorageSwift holds the information to configure the registry to use the OpenStack Swift service for backend storage https://docs.docker.com/registry/storage-drivers/swift/",
+ "authURL": "authURL defines the URL for obtaining an authentication token.",
+ "authVersion": "authVersion specifies the OpenStack Auth's version.",
+ "container": "container defines the name of Swift container where to store the registry's data.",
+ "domain": "domain specifies Openstack's domain name for Identity v3 API.",
+ "domainID": "domainID specifies Openstack's domain id for Identity v3 API.",
+ "tenant": "tenant defines Openstack tenant name to be used by registry.",
+ "tenantID": "tenant defines Openstack tenant id to be used by registry.",
+ "regionName": "regionName defines Openstack's region in which container exists.",
+}
+
+func (ImageRegistryConfigStorageSwift) SwaggerDoc() map[string]string {
+ return map_ImageRegistryConfigStorageSwift
+}
+
+var map_ImageRegistrySpec = map[string]string{
+ "": "ImageRegistrySpec defines the specs for the running registry.",
+ "httpSecret": "httpSecret is the value needed by the registry to secure uploads, generated by default.",
+ "proxy": "proxy defines the proxy to be used when calling master api, upstream registries, etc.",
+ "storage": "storage details for configuring registry storage, e.g. S3 bucket coordinates.",
+ "readOnly": "readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones.",
+ "disableRedirect": "disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend.",
+ "requests": "requests controls how many parallel requests a given registry instance will handle before queuing additional requests.",
+ "defaultRoute": "defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname.",
+ "routes": "routes defines additional external facing routes which should be created for the registry.",
+ "replicas": "replicas determines the number of registry instances to run.",
+ "logging": "logging is deprecated, use logLevel instead.",
+ "resources": "resources defines the resource requests+limits for the registry pod.",
+ "nodeSelector": "nodeSelector defines the node selection constraints for the registry pod.",
+ "tolerations": "tolerations defines the tolerations for the registry pod.",
+ "rolloutStrategy": "rolloutStrategy defines rollout strategy for the image registry deployment.",
+ "affinity": "affinity is a group of node affinity scheduling rules for the image registry pod(s).",
+ "topologySpreadConstraints": "topologySpreadConstraints specify how to spread matching pods among the given topology.",
+}
+
+func (ImageRegistrySpec) SwaggerDoc() map[string]string {
+ return map_ImageRegistrySpec
+}
+
+var map_ImageRegistryStatus = map[string]string{
+ "": "ImageRegistryStatus reports image registry operational status.",
+ "storageManaged": "storageManaged is deprecated, please refer to Storage.managementState",
+ "storage": "storage indicates the current applied storage configuration of the registry.",
+}
+
+func (ImageRegistryStatus) SwaggerDoc() map[string]string {
+ return map_ImageRegistryStatus
+}
+
+var map_KMSEncryptionAlibaba = map[string]string{
+ "keyID": "KeyID holds the KMS encryption key ID",
+}
+
+func (KMSEncryptionAlibaba) SwaggerDoc() map[string]string {
+ return map_KMSEncryptionAlibaba
+}
+
+var map_S3TrustedCASource = map[string]string{
+ "": "S3TrustedCASource references a config map with a CA certificate bundle in the \"openshift-config\" namespace. The key for the bundle in the config map is \"ca-bundle.crt\".",
+ "name": "name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used.",
+}
+
+func (S3TrustedCASource) SwaggerDoc() map[string]string {
+ return map_S3TrustedCASource
+}
+
+var map_ImagePruner = map[string]string{
+ "": "ImagePruner is the configuration object for an image registry pruner managed by the registry operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImagePruner) SwaggerDoc() map[string]string {
+ return map_ImagePruner
+}
+
+var map_ImagePrunerList = map[string]string{
+ "": "ImagePrunerList is a slice of ImagePruner objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImagePrunerList) SwaggerDoc() map[string]string {
+ return map_ImagePrunerList
+}
+
+var map_ImagePrunerSpec = map[string]string{
+ "": "ImagePrunerSpec defines the specs for the running image pruner.",
+ "schedule": "schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. Defaults to `0 0 * * *`.",
+ "suspend": "suspend specifies whether or not to suspend subsequent executions of this cronjob. Defaults to false.",
+ "keepTagRevisions": "keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. Defaults to 3.",
+ "keepYoungerThan": "keepYoungerThan specifies the minimum age in nanoseconds of an image and its referrers for it to be considered a candidate for pruning. DEPRECATED: This field is deprecated in favor of keepYoungerThanDuration. If both are set, this field is ignored and keepYoungerThanDuration takes precedence.",
+ "keepYoungerThanDuration": "keepYoungerThanDuration specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. Defaults to 60m (60 minutes).",
+ "resources": "resources defines the resource requests and limits for the image pruner pod.",
+ "affinity": "affinity is a group of node affinity scheduling rules for the image pruner pod.",
+ "nodeSelector": "nodeSelector defines the node selection constraints for the image pruner pod.",
+ "tolerations": "tolerations defines the node tolerations for the image pruner pod.",
+ "successfulJobsHistoryLimit": "successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. Defaults to 3 if not set.",
+ "failedJobsHistoryLimit": "failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. Defaults to 3 if not set.",
+ "ignoreInvalidImageReferences": "ignoreInvalidImageReferences indicates whether the pruner can ignore errors while parsing image references.",
+ "logLevel": "logLevel sets the level of log output for the pruner job.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".",
+}
+
+func (ImagePrunerSpec) SwaggerDoc() map[string]string {
+ return map_ImagePrunerSpec
+}
+
+var map_ImagePrunerStatus = map[string]string{
+ "": "ImagePrunerStatus reports image pruner operational status.",
+ "observedGeneration": "observedGeneration is the last generation change that has been applied.",
+ "conditions": "conditions is a list of conditions and their status.",
+}
+
+func (ImagePrunerStatus) SwaggerDoc() map[string]string {
+ return map_ImagePrunerStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go
new file mode 100644
index 0000000000..cc91150009
--- /dev/null
+++ b/vendor/github.com/openshift/api/install.go
@@ -0,0 +1,167 @@
+package api
+
+import (
+ kadmissionv1 "k8s.io/api/admission/v1"
+ kadmissionv1beta1 "k8s.io/api/admission/v1beta1"
+ kadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ kadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ kappsv1 "k8s.io/api/apps/v1"
+ kappsv1beta1 "k8s.io/api/apps/v1beta1"
+ kappsv1beta2 "k8s.io/api/apps/v1beta2"
+ kauthenticationv1 "k8s.io/api/authentication/v1"
+ kauthenticationv1beta1 "k8s.io/api/authentication/v1beta1"
+ kauthorizationv1 "k8s.io/api/authorization/v1"
+ kauthorizationv1beta1 "k8s.io/api/authorization/v1beta1"
+ kautoscalingv1 "k8s.io/api/autoscaling/v1"
+ kautoscalingv2 "k8s.io/api/autoscaling/v2"
+ kautoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
+ kautoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
+ kbatchv1 "k8s.io/api/batch/v1"
+ kbatchv1beta1 "k8s.io/api/batch/v1beta1"
+ kcertificatesv1 "k8s.io/api/certificates/v1"
+ kcertificatesv1beta1 "k8s.io/api/certificates/v1beta1"
+ kcoordinationv1 "k8s.io/api/coordination/v1"
+ kcoordinationv1beta1 "k8s.io/api/coordination/v1beta1"
+ kcorev1 "k8s.io/api/core/v1"
+ keventsv1 "k8s.io/api/events/v1"
+ keventsv1beta1 "k8s.io/api/events/v1beta1"
+ kextensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ kflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1"
+ kflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
+ kimagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
+ knetworkingv1 "k8s.io/api/networking/v1"
+ knetworkingv1beta1 "k8s.io/api/networking/v1beta1"
+ knodev1 "k8s.io/api/node/v1"
+ knodev1alpha1 "k8s.io/api/node/v1alpha1"
+ knodev1beta1 "k8s.io/api/node/v1beta1"
+ kpolicyv1 "k8s.io/api/policy/v1"
+ kpolicyv1beta1 "k8s.io/api/policy/v1beta1"
+ krbacv1 "k8s.io/api/rbac/v1"
+ krbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
+ krbacv1beta1 "k8s.io/api/rbac/v1beta1"
+ kschedulingv1 "k8s.io/api/scheduling/v1"
+ kschedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ kschedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
+ kstoragev1 "k8s.io/api/storage/v1"
+ kstoragev1alpha1 "k8s.io/api/storage/v1alpha1"
+ kstoragev1beta1 "k8s.io/api/storage/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/openshift/api/apiserver"
+ "github.com/openshift/api/apps"
+ "github.com/openshift/api/authorization"
+ "github.com/openshift/api/build"
+ "github.com/openshift/api/cloudnetwork"
+ "github.com/openshift/api/config"
+ "github.com/openshift/api/console"
+ "github.com/openshift/api/helm"
+ "github.com/openshift/api/image"
+ "github.com/openshift/api/imageregistry"
+ "github.com/openshift/api/kubecontrolplane"
+ "github.com/openshift/api/machine"
+ "github.com/openshift/api/monitoring"
+ "github.com/openshift/api/network"
+ "github.com/openshift/api/networkoperator"
+ "github.com/openshift/api/oauth"
+ "github.com/openshift/api/openshiftcontrolplane"
+ "github.com/openshift/api/operator"
+ "github.com/openshift/api/operatorcontrolplane"
+ "github.com/openshift/api/osin"
+ "github.com/openshift/api/project"
+ "github.com/openshift/api/quota"
+ "github.com/openshift/api/route"
+ "github.com/openshift/api/samples"
+ "github.com/openshift/api/security"
+ "github.com/openshift/api/servicecertsigner"
+ "github.com/openshift/api/sharedresource"
+ "github.com/openshift/api/template"
+ "github.com/openshift/api/user"
+
+ // just make sure this compiles. Don't add it to a scheme
+ _ "github.com/openshift/api/legacyconfig/v1"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(
+ apiserver.Install,
+ apps.Install,
+ authorization.Install,
+ build.Install,
+ config.Install,
+ console.Install,
+ helm.Install,
+ image.Install,
+ imageregistry.Install,
+ kubecontrolplane.Install,
+ cloudnetwork.Install,
+ network.Install,
+ networkoperator.Install,
+ oauth.Install,
+ openshiftcontrolplane.Install,
+ operator.Install,
+ operatorcontrolplane.Install,
+ osin.Install,
+ project.Install,
+ quota.Install,
+ route.Install,
+ samples.Install,
+ security.Install,
+ servicecertsigner.Install,
+ sharedresource.Install,
+ template.Install,
+ user.Install,
+ machine.Install,
+ monitoring.Install,
+ )
+ // Install is a function which adds every version of every openshift group to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ kubeSchemeBuilder = runtime.NewSchemeBuilder(
+ kadmissionv1.AddToScheme,
+ kadmissionv1beta1.AddToScheme,
+ kadmissionregistrationv1.AddToScheme,
+ kadmissionregistrationv1beta1.AddToScheme,
+ kappsv1.AddToScheme,
+ kappsv1beta1.AddToScheme,
+ kappsv1beta2.AddToScheme,
+ kauthenticationv1.AddToScheme,
+ kauthenticationv1beta1.AddToScheme,
+ kauthorizationv1.AddToScheme,
+ kauthorizationv1beta1.AddToScheme,
+ kautoscalingv1.AddToScheme,
+ kautoscalingv2.AddToScheme,
+ kautoscalingv2beta1.AddToScheme,
+ kautoscalingv2beta2.AddToScheme,
+ kbatchv1.AddToScheme,
+ kbatchv1beta1.AddToScheme,
+ kcertificatesv1.AddToScheme,
+ kcertificatesv1beta1.AddToScheme,
+ kcorev1.AddToScheme,
+ kcoordinationv1.AddToScheme,
+ kcoordinationv1beta1.AddToScheme,
+ keventsv1.AddToScheme,
+ keventsv1beta1.AddToScheme,
+ kextensionsv1beta1.AddToScheme,
+ kflowcontrolv1beta1.AddToScheme,
+ kflowcontrolv1beta2.AddToScheme,
+ kimagepolicyv1alpha1.AddToScheme,
+ knetworkingv1.AddToScheme,
+ knetworkingv1beta1.AddToScheme,
+ knodev1.AddToScheme,
+ knodev1alpha1.AddToScheme,
+ knodev1beta1.AddToScheme,
+ kpolicyv1.AddToScheme,
+ kpolicyv1beta1.AddToScheme,
+ krbacv1.AddToScheme,
+ krbacv1beta1.AddToScheme,
+ krbacv1alpha1.AddToScheme,
+ kschedulingv1.AddToScheme,
+ kschedulingv1alpha1.AddToScheme,
+ kschedulingv1beta1.AddToScheme,
+ kstoragev1.AddToScheme,
+ kstoragev1beta1.AddToScheme,
+ kstoragev1alpha1.AddToScheme,
+ )
+ // InstallKube is a way to install all the external k8s.io/api types
+ InstallKube = kubeSchemeBuilder.AddToScheme
+)
diff --git a/vendor/github.com/openshift/api/kubecontrolplane/.codegen.yaml b/vendor/github.com/openshift/api/kubecontrolplane/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/kubecontrolplane/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/kubecontrolplane/install.go b/vendor/github.com/openshift/api/kubecontrolplane/install.go
new file mode 100644
index 0000000000..c34b777235
--- /dev/null
+++ b/vendor/github.com/openshift/api/kubecontrolplane/install.go
@@ -0,0 +1,26 @@
+package kubecontrolplane
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1"
+)
+
+const (
+ GroupName = "kubecontrolplane.config.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(kubecontrolplanev1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go
new file mode 100644
index 0000000000..d8872a6132
--- /dev/null
+++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=kubecontrolplane.config.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go
new file mode 100644
index 0000000000..f8abc8ad8c
--- /dev/null
+++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go
@@ -0,0 +1,38 @@
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ osinv1 "github.com/openshift/api/osin/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "kubecontrolplane.config.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, osinv1.Install, configv1.Install)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &KubeAPIServerConfig{},
+ &KubeControllerManagerConfig{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go
new file mode 100644
index 0000000000..b9cdcc213b
--- /dev/null
+++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go
@@ -0,0 +1,217 @@
+package v1
+
+import (
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ configv1 "github.com/openshift/api/config/v1"
+ osinv1 "github.com/openshift/api/osin/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type KubeAPIServerConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // provides the standard apiserver configuration
+ configv1.GenericAPIServerConfig `json:",inline"`
+
+ // authConfig configures authentication options in addition to the standard
+ // oauth token and client certificate authenticators
+ AuthConfig MasterAuthConfig `json:"authConfig"`
+
+ // aggregatorConfig has options for configuring the aggregator component of the API server.
+ AggregatorConfig AggregatorConfig `json:"aggregatorConfig"`
+
+ // kubeletClientInfo contains information about how to connect to kubelets
+ KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"`
+
+ // servicesSubnet is the subnet to use for assigning service IPs
+ ServicesSubnet string `json:"servicesSubnet"`
+ // servicesNodePortRange is the range to use for assigning service public ports on a host.
+ ServicesNodePortRange string `json:"servicesNodePortRange"`
+
+ // DEPRECATED: consolePublicURL has been deprecated and setting it has no effect.
+ ConsolePublicURL string `json:"consolePublicURL"`
+
+ // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!
+ // TODO I think we should just drop this feature.
+ UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"`
+
+ // imagePolicyConfig feeds the image policy admission plugin
+ // TODO make it an admission plugin config
+ ImagePolicyConfig KubeAPIServerImagePolicyConfig `json:"imagePolicyConfig"`
+
+ // projectConfig feeds an admission plugin
+ // TODO make it an admission plugin config
+ ProjectConfig KubeAPIServerProjectConfig `json:"projectConfig"`
+
+ // serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key.
+ // (If any file contains a private key, the public portion of the key is used)
+ // The list of public keys is used to verify presented service account tokens.
+ // Each key is tried in order until the list is exhausted or verification succeeds.
+ // If no keys are specified, no service account authentication will be available.
+ ServiceAccountPublicKeyFiles []string `json:"serviceAccountPublicKeyFiles"`
+
+ // oauthConfig, if present start the /oauth endpoint in this process
+ OAuthConfig *osinv1.OAuthConfig `json:"oauthConfig"`
+
+ // TODO this needs to be removed.
+ APIServerArguments map[string]Arguments `json:"apiServerArguments"`
+}
+
+// Arguments masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type Arguments []string
+
+func (t Arguments) String() string {
+ return fmt.Sprintf("%v", []string(t))
+}
+
+type KubeAPIServerImagePolicyConfig struct {
+ // internalRegistryHostname sets the hostname for the default internal image
+ // registry. The value must be in "hostname[:port]" format.
+ InternalRegistryHostname string `json:"internalRegistryHostname"`
+ // externalRegistryHostnames provides the hostnames for the default external image
+ // registry. The external hostname should be set only when the image registry
+ // is exposed externally. The first value is used in 'publicDockerImageRepository'
+ // field in ImageStreams. The value must be in "hostname[:port]" format.
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames"`
+}
+
+type KubeAPIServerProjectConfig struct {
+ // defaultNodeSelector holds default project node label selector
+ DefaultNodeSelector string `json:"defaultNodeSelector"`
+}
+
+// KubeletConnectionInfo holds information necessary for connecting to a kubelet
+type KubeletConnectionInfo struct {
+ // port is the port to connect to kubelets on
+ Port uint32 `json:"port"`
+ // ca is the CA for verifying TLS connections to kubelets
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information for securing communication to kubelets
+ // this is anonymous so that we can inline it for serialization
+ configv1.CertInfo `json:",inline"`
+}
+
+// UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!
+type UserAgentMatchingConfig struct {
+ // requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed
+ RequiredClients []UserAgentMatchRule `json:"requiredClients"`
+
+ // deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes
+ DeniedClients []UserAgentDenyRule `json:"deniedClients"`
+
+ // defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.
+ DefaultRejectionMessage string `json:"defaultRejectionMessage"`
+}
+
+// UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb
+type UserAgentMatchRule struct {
+ // regex is a regex that is checked against the User-Agent.
+ // Known variants of oc clients
+ // 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f
+ // 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f
+ // 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f
+ // 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f
+ Regex string `json:"regex"`
+
+ // httpVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs".
+ HTTPVerbs []string `json:"httpVerbs"`
+}
+
+// UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client
+type UserAgentDenyRule struct {
+ UserAgentMatchRule `json:",inline"`
+
+ // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.
+ RejectionMessage string `json:"rejectionMessage"`
+}
+
+// MasterAuthConfig configures authentication options in addition to the standard
+// oauth token and client certificate authenticators
+type MasterAuthConfig struct {
+ // requestHeader holds options for setting up a front proxy against the API. It is optional.
+ RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"`
+ // webhookTokenAuthenticators, if present configures remote token reviewers
+ WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"`
+ // oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization
+ // Server Metadata for an external OAuth server.
+ // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ // This option is mutually exclusive with OAuthConfig
+ OAuthMetadataFile string `json:"oauthMetadataFile"`
+}
+
+// WebhookTokenAuthenticators holds the necessary configuation options for
+// external token authenticators
+type WebhookTokenAuthenticator struct {
+ // configFile is a path to a Kubeconfig file with the webhook configuration
+ ConfigFile string `json:"configFile"`
+ // cacheTTL indicates how long an authentication result should be cached.
+ // It takes a valid time duration string (e.g. "5m").
+ // If empty, you get a default timeout of 2 minutes.
+ // If zero (e.g. "0m"), caching is disabled
+ CacheTTL string `json:"cacheTTL"`
+}
+
+// RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire
+// API instead of against the /oauth endpoint.
+type RequestHeaderAuthenticationOptions struct {
+ // clientCA is a file with the trusted signer certs. It is required.
+ ClientCA string `json:"clientCA"`
+ // clientCommonNames is a required list of common names to require a match from.
+ ClientCommonNames []string `json:"clientCommonNames"`
+
+ // usernameHeaders is the list of headers to check for user information. First hit wins.
+ UsernameHeaders []string `json:"usernameHeaders"`
+ // groupHeaders is the set of headers to check for group information. All are unioned.
+ GroupHeaders []string `json:"groupHeaders"`
+ // extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.
+ ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"`
+}
+
+// AggregatorConfig holds information required to make the aggregator function.
+type AggregatorConfig struct {
+ // proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers
+ ProxyClientInfo configv1.CertInfo `json:"proxyClientInfo"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type KubeControllerManagerConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // serviceServingCert provides support for the old alpha service serving cert signer CA bundle
+ ServiceServingCert ServiceServingCert `json:"serviceServingCert"`
+
+ // projectConfig is an optimization for the daemonset controller
+ ProjectConfig KubeControllerManagerProjectConfig `json:"projectConfig"`
+
+ // extendedArguments is used to configure the kube-controller-manager
+ ExtendedArguments map[string]Arguments `json:"extendedArguments"`
+}
+
+type KubeControllerManagerProjectConfig struct {
+ // defaultNodeSelector holds default project node label selector
+ DefaultNodeSelector string `json:"defaultNodeSelector"`
+}
+
+// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for
+// pods fulfilling a service to serve with.
+type ServiceServingCert struct {
+ // CertFile is a file containing a PEM-encoded certificate
+ CertFile string `json:"certFile"`
+}
diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..e4378aa527
--- /dev/null
+++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go
@@ -0,0 +1,379 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ osinv1 "github.com/openshift/api/osin/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AggregatorConfig) DeepCopyInto(out *AggregatorConfig) {
+ *out = *in
+ out.ProxyClientInfo = in.ProxyClientInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregatorConfig.
+func (in *AggregatorConfig) DeepCopy() *AggregatorConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AggregatorConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Arguments) DeepCopyInto(out *Arguments) {
+ {
+ in := &in
+ *out = make(Arguments, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Arguments.
+func (in Arguments) DeepCopy() Arguments {
+ if in == nil {
+ return nil
+ }
+ out := new(Arguments)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig)
+ in.AuthConfig.DeepCopyInto(&out.AuthConfig)
+ out.AggregatorConfig = in.AggregatorConfig
+ out.KubeletClientInfo = in.KubeletClientInfo
+ in.UserAgentMatchingConfig.DeepCopyInto(&out.UserAgentMatchingConfig)
+ in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig)
+ out.ProjectConfig = in.ProjectConfig
+ if in.ServiceAccountPublicKeyFiles != nil {
+ in, out := &in.ServiceAccountPublicKeyFiles, &out.ServiceAccountPublicKeyFiles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.OAuthConfig != nil {
+ in, out := &in.OAuthConfig, &out.OAuthConfig
+ *out = new(osinv1.OAuthConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.APIServerArguments != nil {
+ in, out := &in.APIServerArguments, &out.APIServerArguments
+ *out = make(map[string]Arguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make(Arguments, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig.
+func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeAPIServerConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerImagePolicyConfig) DeepCopyInto(out *KubeAPIServerImagePolicyConfig) {
+ *out = *in
+ if in.ExternalRegistryHostnames != nil {
+ in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerImagePolicyConfig.
+func (in *KubeAPIServerImagePolicyConfig) DeepCopy() *KubeAPIServerImagePolicyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerImagePolicyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerProjectConfig) DeepCopyInto(out *KubeAPIServerProjectConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerProjectConfig.
+func (in *KubeAPIServerProjectConfig) DeepCopy() *KubeAPIServerProjectConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerProjectConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ServiceServingCert = in.ServiceServingCert
+ out.ProjectConfig = in.ProjectConfig
+ if in.ExtendedArguments != nil {
+ in, out := &in.ExtendedArguments, &out.ExtendedArguments
+ *out = make(map[string]Arguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make(Arguments, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig.
+func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeControllerManagerConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerProjectConfig) DeepCopyInto(out *KubeControllerManagerProjectConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerProjectConfig.
+func (in *KubeControllerManagerProjectConfig) DeepCopy() *KubeControllerManagerProjectConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerProjectConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConnectionInfo) DeepCopyInto(out *KubeletConnectionInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConnectionInfo.
+func (in *KubeletConnectionInfo) DeepCopy() *KubeletConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterAuthConfig) DeepCopyInto(out *MasterAuthConfig) {
+ *out = *in
+ if in.RequestHeader != nil {
+ in, out := &in.RequestHeader, &out.RequestHeader
+ *out = new(RequestHeaderAuthenticationOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WebhookTokenAuthenticators != nil {
+ in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators
+ *out = make([]WebhookTokenAuthenticator, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthConfig.
+func (in *MasterAuthConfig) DeepCopy() *MasterAuthConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterAuthConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestHeaderAuthenticationOptions) DeepCopyInto(out *RequestHeaderAuthenticationOptions) {
+ *out = *in
+ if in.ClientCommonNames != nil {
+ in, out := &in.ClientCommonNames, &out.ClientCommonNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsernameHeaders != nil {
+ in, out := &in.UsernameHeaders, &out.UsernameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupHeaders != nil {
+ in, out := &in.GroupHeaders, &out.GroupHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExtraHeaderPrefixes != nil {
+ in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAuthenticationOptions.
+func (in *RequestHeaderAuthenticationOptions) DeepCopy() *RequestHeaderAuthenticationOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(RequestHeaderAuthenticationOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert.
+func (in *ServiceServingCert) DeepCopy() *ServiceServingCert {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceServingCert)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserAgentDenyRule) DeepCopyInto(out *UserAgentDenyRule) {
+ *out = *in
+ in.UserAgentMatchRule.DeepCopyInto(&out.UserAgentMatchRule)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentDenyRule.
+func (in *UserAgentDenyRule) DeepCopy() *UserAgentDenyRule {
+ if in == nil {
+ return nil
+ }
+ out := new(UserAgentDenyRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserAgentMatchRule) DeepCopyInto(out *UserAgentMatchRule) {
+ *out = *in
+ if in.HTTPVerbs != nil {
+ in, out := &in.HTTPVerbs, &out.HTTPVerbs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchRule.
+func (in *UserAgentMatchRule) DeepCopy() *UserAgentMatchRule {
+ if in == nil {
+ return nil
+ }
+ out := new(UserAgentMatchRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserAgentMatchingConfig) DeepCopyInto(out *UserAgentMatchingConfig) {
+ *out = *in
+ if in.RequiredClients != nil {
+ in, out := &in.RequiredClients, &out.RequiredClients
+ *out = make([]UserAgentMatchRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DeniedClients != nil {
+ in, out := &in.DeniedClients, &out.DeniedClients
+ *out = make([]UserAgentDenyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchingConfig.
+func (in *UserAgentMatchingConfig) DeepCopy() *UserAgentMatchingConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(UserAgentMatchingConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator.
+func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator {
+ if in == nil {
+ return nil
+ }
+ out := new(WebhookTokenAuthenticator)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..906bb271b0
--- /dev/null
+++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,161 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AggregatorConfig = map[string]string{
+ "": "AggregatorConfig holds information required to make the aggregator function.",
+ "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers",
+}
+
+func (AggregatorConfig) SwaggerDoc() map[string]string {
+ return map_AggregatorConfig
+}
+
+var map_KubeAPIServerConfig = map[string]string{
+ "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "authConfig": "authConfig configures authentication options in addition to the standard oauth token and client certificate authenticators",
+ "aggregatorConfig": "aggregatorConfig has options for configuring the aggregator component of the API server.",
+ "kubeletClientInfo": "kubeletClientInfo contains information about how to connect to kubelets",
+ "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs",
+ "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.",
+ "consolePublicURL": "DEPRECATED: consolePublicURL has been deprecated and setting it has no effect.",
+ "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!",
+ "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin",
+ "projectConfig": "projectConfig feeds an admission plugin",
+ "serviceAccountPublicKeyFiles": "serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.",
+ "oauthConfig": "oauthConfig, if present start the /oauth endpoint in this process",
+}
+
+func (KubeAPIServerConfig) SwaggerDoc() map[string]string {
+ return map_KubeAPIServerConfig
+}
+
+var map_KubeAPIServerImagePolicyConfig = map[string]string{
+ "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.",
+ "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
+}
+
+func (KubeAPIServerImagePolicyConfig) SwaggerDoc() map[string]string {
+ return map_KubeAPIServerImagePolicyConfig
+}
+
+var map_KubeAPIServerProjectConfig = map[string]string{
+ "defaultNodeSelector": "defaultNodeSelector holds default project node label selector",
+}
+
+func (KubeAPIServerProjectConfig) SwaggerDoc() map[string]string {
+ return map_KubeAPIServerProjectConfig
+}
+
+var map_KubeControllerManagerConfig = map[string]string{
+ "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "serviceServingCert": "serviceServingCert provides support for the old alpha service serving cert signer CA bundle",
+ "projectConfig": "projectConfig is an optimization for the daemonset controller",
+ "extendedArguments": "extendedArguments is used to configure the kube-controller-manager",
+}
+
+func (KubeControllerManagerConfig) SwaggerDoc() map[string]string {
+ return map_KubeControllerManagerConfig
+}
+
+var map_KubeControllerManagerProjectConfig = map[string]string{
+ "defaultNodeSelector": "defaultNodeSelector holds default project node label selector",
+}
+
+func (KubeControllerManagerProjectConfig) SwaggerDoc() map[string]string {
+ return map_KubeControllerManagerProjectConfig
+}
+
+var map_KubeletConnectionInfo = map[string]string{
+ "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet",
+ "port": "port is the port to connect to kubelets on",
+ "ca": "ca is the CA for verifying TLS connections to kubelets",
+}
+
+func (KubeletConnectionInfo) SwaggerDoc() map[string]string {
+ return map_KubeletConnectionInfo
+}
+
+var map_MasterAuthConfig = map[string]string{
+ "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators",
+ "requestHeader": "requestHeader holds options for setting up a front proxy against the API. It is optional.",
+ "webhookTokenAuthenticators": "webhookTokenAuthenticators, if present configures remote token reviewers",
+ "oauthMetadataFile": "oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig",
+}
+
+func (MasterAuthConfig) SwaggerDoc() map[string]string {
+ return map_MasterAuthConfig
+}
+
+var map_RequestHeaderAuthenticationOptions = map[string]string{
+ "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.",
+ "clientCA": "clientCA is a file with the trusted signer certs. It is required.",
+ "clientCommonNames": "clientCommonNames is a required list of common names to require a match from.",
+ "usernameHeaders": "usernameHeaders is the list of headers to check for user information. First hit wins.",
+ "groupHeaders": "groupHeaders is the set of headers to check for group information. All are unioned.",
+ "extraHeaderPrefixes": "extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.",
+}
+
+func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string {
+ return map_RequestHeaderAuthenticationOptions
+}
+
+var map_ServiceServingCert = map[string]string{
+ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.",
+ "certFile": "CertFile is a file containing a PEM-encoded certificate",
+}
+
+func (ServiceServingCert) SwaggerDoc() map[string]string {
+ return map_ServiceServingCert
+}
+
+var map_UserAgentDenyRule = map[string]string{
+ "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client",
+ "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.",
+}
+
+func (UserAgentDenyRule) SwaggerDoc() map[string]string {
+ return map_UserAgentDenyRule
+}
+
+var map_UserAgentMatchRule = map[string]string{
+ "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb",
+ "regex": "regex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f",
+ "httpVerbs": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".",
+}
+
+func (UserAgentMatchRule) SwaggerDoc() map[string]string {
+ return map_UserAgentMatchRule
+}
+
+var map_UserAgentMatchingConfig = map[string]string{
+ "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!",
+ "requiredClients": "requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed",
+ "deniedClients": "deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes",
+ "defaultRejectionMessage": "defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.",
+}
+
+func (UserAgentMatchingConfig) SwaggerDoc() map[string]string {
+ return map_UserAgentMatchingConfig
+}
+
+var map_WebhookTokenAuthenticator = map[string]string{
+ "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators",
+ "configFile": "configFile is a path to a Kubeconfig file with the webhook configuration",
+ "cacheTTL": "cacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled",
+}
+
+func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
+ return map_WebhookTokenAuthenticator
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/doc.go b/vendor/github.com/openshift/api/legacyconfig/v1/doc.go
new file mode 100644
index 0000000000..93fc6dc50d
--- /dev/null
+++ b/vendor/github.com/openshift/api/legacyconfig/v1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=legacy.config.openshift.io
+// Package v1 is deprecated and exists to ease a transition to current APIs
+package v1
diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/register.go b/vendor/github.com/openshift/api/legacyconfig/v1/register.go
new file mode 100644
index 0000000000..8ba7525210
--- /dev/null
+++ b/vendor/github.com/openshift/api/legacyconfig/v1/register.go
@@ -0,0 +1,46 @@
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ // Legacy is the 'v1' apiVersion of config
+ LegacyGroupName = ""
+ GroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"}
+ LegacySchemeGroupVersion = GroupVersion
+ legacySchemeBuilder = runtime.NewSchemeBuilder(
+ addKnownTypesToLegacy,
+ )
+ InstallLegacy = legacySchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypesToLegacy(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(LegacySchemeGroupVersion,
+ &MasterConfig{},
+ &NodeConfig{},
+ &SessionSecrets{},
+
+ &BasicAuthPasswordIdentityProvider{},
+ &AllowAllPasswordIdentityProvider{},
+ &DenyAllPasswordIdentityProvider{},
+ &HTPasswdPasswordIdentityProvider{},
+ &LDAPPasswordIdentityProvider{},
+ &KeystonePasswordIdentityProvider{},
+ &RequestHeaderIdentityProvider{},
+ &GitHubIdentityProvider{},
+ &GitLabIdentityProvider{},
+ &GoogleIdentityProvider{},
+ &OpenIDIdentityProvider{},
+
+ &LDAPSyncConfig{},
+
+ &DefaultAdmissionConfig{},
+
+ &BuildDefaultsConfig{},
+ &BuildOverridesConfig{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go b/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go
new file mode 100644
index 0000000000..1450742739
--- /dev/null
+++ b/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go
@@ -0,0 +1,87 @@
+package v1
+
+import "k8s.io/apimachinery/pkg/runtime"
+
+var _ runtime.NestedObjectDecoder = &MasterConfig{}
+
+// DecodeNestedObjects handles encoding RawExtensions on the MasterConfig, ensuring the
+// objects are decoded with the provided decoder.
+func (c *MasterConfig) DecodeNestedObjects(d runtime.Decoder) error {
+ // decoding failures result in a runtime.Unknown object being created in Object and passed
+ // to conversion
+ for k, v := range c.AdmissionConfig.PluginConfig {
+ DecodeNestedRawExtensionOrUnknown(d, &v.Configuration)
+ c.AdmissionConfig.PluginConfig[k] = v
+ }
+ if c.OAuthConfig != nil {
+ for i := range c.OAuthConfig.IdentityProviders {
+ DecodeNestedRawExtensionOrUnknown(d, &c.OAuthConfig.IdentityProviders[i].Provider)
+ }
+ }
+ DecodeNestedRawExtensionOrUnknown(d, &c.AuditConfig.PolicyConfiguration)
+ return nil
+}
+
+var _ runtime.NestedObjectEncoder = &MasterConfig{}
+
+// EncodeNestedObjects handles encoding RawExtensions on the MasterConfig, ensuring the
+// objects are encoded with the provided encoder.
+func (c *MasterConfig) EncodeNestedObjects(e runtime.Encoder) error {
+ for k, v := range c.AdmissionConfig.PluginConfig {
+ if err := EncodeNestedRawExtension(e, &v.Configuration); err != nil {
+ return err
+ }
+ c.AdmissionConfig.PluginConfig[k] = v
+ }
+ if c.OAuthConfig != nil {
+ for i := range c.OAuthConfig.IdentityProviders {
+ if err := EncodeNestedRawExtension(e, &c.OAuthConfig.IdentityProviders[i].Provider); err != nil {
+ return err
+ }
+ }
+ }
+ if err := EncodeNestedRawExtension(e, &c.AuditConfig.PolicyConfiguration); err != nil {
+ return err
+ }
+ return nil
+}
+
+// DecodeNestedRawExtensionOrUnknown
+func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) {
+ if ext.Raw == nil || ext.Object != nil {
+ return
+ }
+ obj, gvk, err := d.Decode(ext.Raw, nil, nil)
+ if err != nil {
+ unk := &runtime.Unknown{Raw: ext.Raw}
+ if runtime.IsNotRegisteredError(err) {
+ if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil {
+ unk.APIVersion = gvk.GroupVersion().String()
+ unk.Kind = gvk.Kind
+ ext.Object = unk
+ return
+ }
+ }
+ // TODO: record mime-type with the object
+ if gvk != nil {
+ unk.APIVersion = gvk.GroupVersion().String()
+ unk.Kind = gvk.Kind
+ }
+ obj = unk
+ }
+ ext.Object = obj
+}
+
+// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or
+// return an error.
+func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error {
+ if ext.Raw != nil || ext.Object == nil {
+ return nil
+ }
+ data, err := runtime.Encode(e, ext.Object)
+ if err != nil {
+ return err
+ }
+ ext.Raw = data
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go b/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go
new file mode 100644
index 0000000000..6a5718c1db
--- /dev/null
+++ b/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go
@@ -0,0 +1,31 @@
+package v1
+
+import "encoding/json"
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+// If the value is a string, it sets the Value field of the StringSource.
+// Otherwise, it is unmarshaled into the StringSourceSpec struct
+func (s *StringSource) UnmarshalJSON(value []byte) error {
+ // If we can unmarshal to a simple string, just set the value
+ var simpleValue string
+ if err := json.Unmarshal(value, &simpleValue); err == nil {
+ s.Value = simpleValue
+ return nil
+ }
+
+ // Otherwise do the full struct unmarshal
+ return json.Unmarshal(value, &s.StringSourceSpec)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string.
+// Otherwise, the StringSourceSpec struct is marshaled as a JSON object.
+func (s *StringSource) MarshalJSON() ([]byte, error) {
+ // If we have only a cleartext value set, do a simple string marshal
+ if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) {
+ return json.Marshal(s.Value)
+ }
+
+ // Otherwise do the full struct marshal of the externalized bits
+ return json.Marshal(s.StringSourceSpec)
+}
diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/types.go b/vendor/github.com/openshift/api/legacyconfig/v1/types.go
new file mode 100644
index 0000000000..eaf40b6ee4
--- /dev/null
+++ b/vendor/github.com/openshift/api/legacyconfig/v1/types.go
@@ -0,0 +1,1596 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ buildv1 "github.com/openshift/api/build/v1"
+)
+
+type ExtendedArguments map[string][]string
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NodeConfig is the fully specified config starting an OpenShift node
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type NodeConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname.
+ // If you're describing a set of static nodes to the master, this value must match one of the values in the list
+ NodeName string `json:"nodeName"`
+
+ // Node may have multiple IPs, specify the IP to use for pod traffic routing
+ // If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used
+ NodeIP string `json:"nodeIP"`
+
+ // ServingInfo describes how to start serving
+ ServingInfo ServingInfo `json:"servingInfo"`
+
+ // MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master
+ MasterKubeConfig string `json:"masterKubeConfig"`
+
+ // MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master.
+ MasterClientConnectionOverrides *ClientConnectionOverrides `json:"masterClientConnectionOverrides"`
+
+ // DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to
+ // 'cluster.local'.
+ DNSDomain string `json:"dnsDomain"`
+
+ // DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes
+ // master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured
+ // to resolve names from any other port). When running more complex local DNS configurations, this is often set
+ // to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see
+ // dnsBindAddress) or the master DNS.
+ DNSIP string `json:"dnsIP"`
+
+ // DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started.
+ // Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need
+ // a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured
+ // on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other
+ // queries to the host environments nameservers.
+ DNSBindAddress string `json:"dnsBindAddress"`
+
+ // DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running
+ // a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to
+ // the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the
+ // system, this value should be set to the upstream nameservers dnsmasq resolves with.
+ DNSNameservers []string `json:"dnsNameservers"`
+
+ // DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server.
+ // Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra
+ // nameservers to DNSNameservers if set.
+ DNSRecursiveResolvConf string `json:"dnsRecursiveResolvConf"`
+
+ // Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead
+ DeprecatedNetworkPluginName string `json:"networkPluginName,omitempty"`
+
+ // NetworkConfig provides network options for the node
+ NetworkConfig NodeNetworkConfig `json:"networkConfig"`
+
+ // VolumeDirectory is the directory that volumes will be stored under
+ VolumeDirectory string `json:"volumeDirectory"`
+
+ // ImageConfig holds options that describe how to build image names for system components
+ ImageConfig ImageConfig `json:"imageConfig"`
+
+ // AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.
+ AllowDisabledDocker bool `json:"allowDisabledDocker"`
+
+ // PodManifestConfig holds the configuration for enabling the Kubelet to
+ // create pods based from a manifest file(s) placed locally on the node
+ PodManifestConfig *PodManifestConfig `json:"podManifestConfig"`
+
+ // AuthConfig holds authn/authz configuration options
+ AuthConfig NodeAuthConfig `json:"authConfig"`
+
+ // DockerConfig holds Docker related configuration options.
+ DockerConfig DockerConfig `json:"dockerConfig"`
+
+ // KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's
+ // command line arguments. These are not migrated or validated, so if you use them they may become invalid.
+ // These values override other settings in NodeConfig which may cause invalid configurations.
+ KubeletArguments ExtendedArguments `json:"kubeletArguments,omitempty"`
+
+ // ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's
+ // command line arguments. These are not migrated or validated, so if you use them they may become invalid.
+ // These values override other settings in NodeConfig which may cause invalid configurations.
+ ProxyArguments ExtendedArguments `json:"proxyArguments,omitempty"`
+
+ // IPTablesSyncPeriod is how often iptable rules are refreshed
+ IPTablesSyncPeriod string `json:"iptablesSyncPeriod"`
+
+ // EnableUnidling controls whether or not the hybrid unidling proxy will be set up
+ EnableUnidling *bool `json:"enableUnidling"`
+
+ // VolumeConfig contains options for configuring volumes on the node.
+ VolumeConfig NodeVolumeConfig `json:"volumeConfig"`
+}
+
+// NodeVolumeConfig contains options for configuring volumes on the node.
+type NodeVolumeConfig struct {
+ // LocalQuota contains options for controlling local volume quota on the node.
+ LocalQuota LocalQuota `json:"localQuota"`
+}
+
+// MasterVolumeConfig contains options for configuring volume plugins in the master node.
+type MasterVolumeConfig struct {
+ // DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true
+ DynamicProvisioningEnabled *bool `json:"dynamicProvisioningEnabled"`
+}
+
+// LocalQuota contains options for controlling local volume quota on the node.
+type LocalQuota struct {
+ // FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID.
+ // At present this is only implemented for emptyDir volumes, and if the underlying
+ // volumeDirectory is on an XFS filesystem.
+ PerFSGroup *resource.Quantity `json:"perFSGroup"`
+}
+
+// NodeAuthConfig holds authn/authz configuration options
+type NodeAuthConfig struct {
+ // AuthenticationCacheTTL indicates how long an authentication result should be cached.
+ // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled
+ AuthenticationCacheTTL string `json:"authenticationCacheTTL"`
+
+ // AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.
+ AuthenticationCacheSize int `json:"authenticationCacheSize"`
+
+ // AuthorizationCacheTTL indicates how long an authorization result should be cached.
+ // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled
+ AuthorizationCacheTTL string `json:"authorizationCacheTTL"`
+
+ // AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.
+ AuthorizationCacheSize int `json:"authorizationCacheSize"`
+}
+
+// NodeNetworkConfig provides network options for the node
+type NodeNetworkConfig struct {
+ // NetworkPluginName is a string specifying the networking plugin
+ NetworkPluginName string `json:"networkPluginName"`
+ // Maximum transmission unit for the network packets
+ MTU uint32 `json:"mtu"`
+}
+
+// DockerConfig holds Docker related configuration options.
+type DockerConfig struct {
+ // ExecHandlerName is the name of the handler to use for executing
+ // commands in containers.
+ ExecHandlerName DockerExecHandlerType `json:"execHandlerName"`
+ // DockerShimSocket is the location of the dockershim socket the kubelet uses.
+ // Currently unix socket is supported on Linux, and tcp is supported on windows.
+ // Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'
+ DockerShimSocket string `json:"dockerShimSocket"`
+ // DockershimRootDirectory is the dockershim root directory.
+ DockershimRootDirectory string `json:"dockerShimRootDirectory"`
+}
+
+type DockerExecHandlerType string
+
+const (
+ // DockerExecHandlerNative uses Docker's exec API for executing commands in containers.
+ DockerExecHandlerNative DockerExecHandlerType = "native"
+ // DockerExecHandlerNsenter uses nsenter for executing commands in containers.
+ DockerExecHandlerNsenter DockerExecHandlerType = "nsenter"
+
+ // ControllersDisabled indicates no controllers should be enabled.
+ ControllersDisabled = "none"
+ // ControllersAll indicates all controllers should be started.
+ ControllersAll = "*"
+)
+
+// FeatureList contains a set of features
+type FeatureList []string
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MasterConfig holds the necessary configuration options for the OpenShift master
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type MasterConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ServingInfo describes how to start serving
+ ServingInfo HTTPServingInfo `json:"servingInfo"`
+
+ // AuthConfig configures authentication options in addition to the standard
+ // oauth token and client certificate authenticators
+ AuthConfig MasterAuthConfig `json:"authConfig"`
+
+ // AggregatorConfig has options for configuring the aggregator component of the API server.
+ AggregatorConfig AggregatorConfig `json:"aggregatorConfig"`
+
+ // CORSAllowedOrigins
+ CORSAllowedOrigins []string `json:"corsAllowedOrigins"`
+
+ // APILevels is a list of API levels that should be enabled on startup: v1 as examples
+ APILevels []string `json:"apiLevels"`
+
+ // MasterPublicURL is how clients can access the OpenShift API server
+ MasterPublicURL string `json:"masterPublicURL"`
+
+ // Controllers is a list of the controllers that should be started. If set to "none", no controllers
+ // will start automatically. The default value is "*" which will start all controllers. When
+ // using "*", you may exclude controllers by prepending a "-" in front of their name. No other
+ // values are recognized at this time.
+ Controllers string `json:"controllers"`
+
+ // AdmissionConfig contains admission control plugin configuration.
+ AdmissionConfig AdmissionConfig `json:"admissionConfig"`
+
+ // ControllerConfig holds configuration values for controllers
+ ControllerConfig ControllerConfig `json:"controllerConfig"`
+
+ // EtcdStorageConfig contains information about how API resources are
+ // stored in Etcd. These values are only relevant when etcd is the
+ // backing store for the cluster.
+ EtcdStorageConfig EtcdStorageConfig `json:"etcdStorageConfig"`
+
+ // EtcdClientInfo contains information about how to connect to etcd
+ EtcdClientInfo EtcdConnectionInfo `json:"etcdClientInfo"`
+ // KubeletClientInfo contains information about how to connect to kubelets
+ KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"`
+
+ // KubernetesMasterConfig, if present start the kubernetes master in this process
+ KubernetesMasterConfig KubernetesMasterConfig `json:"kubernetesMasterConfig"`
+ // EtcdConfig, if present start etcd in this process
+ EtcdConfig *EtcdConfig `json:"etcdConfig"`
+ // OAuthConfig, if present start the /oauth endpoint in this process
+ OAuthConfig *OAuthConfig `json:"oauthConfig"`
+
+ // DNSConfig, if present start the DNS server in this process
+ DNSConfig *DNSConfig `json:"dnsConfig"`
+
+ // ServiceAccountConfig holds options related to service accounts
+ ServiceAccountConfig ServiceAccountConfig `json:"serviceAccountConfig"`
+
+ // MasterClients holds all the client connection information for controllers and other system components
+ MasterClients MasterClients `json:"masterClients"`
+
+ // ImageConfig holds options that describe how to build image names for system components
+ ImageConfig ImageConfig `json:"imageConfig"`
+
+ // ImagePolicyConfig controls limits and behavior for importing images
+ ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"`
+
+ // PolicyConfig holds information about where to locate critical pieces of bootstrapping policy
+ PolicyConfig PolicyConfig `json:"policyConfig"`
+
+ // ProjectConfig holds information about project creation and defaults
+ ProjectConfig ProjectConfig `json:"projectConfig"`
+
+ // RoutingConfig holds information about routing and route generation
+ RoutingConfig RoutingConfig `json:"routingConfig"`
+
+ // NetworkConfig to be passed to the compiled in network plugin
+ NetworkConfig MasterNetworkConfig `json:"networkConfig"`
+
+ // MasterVolumeConfig contains options for configuring volume plugins in the master node.
+ VolumeConfig MasterVolumeConfig `json:"volumeConfig"`
+
+ // JenkinsPipelineConfig holds information about the default Jenkins template
+ // used for JenkinsPipeline build strategy.
+ JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"`
+
+ // AuditConfig holds information related to auditing capabilities.
+ AuditConfig AuditConfig `json:"auditConfig"`
+
+ // DisableOpenAPI avoids starting the openapi endpoint because it is very expensive.
+ // This option will be removed at a later time. It is never serialized.
+ DisableOpenAPI bool `json:"-"`
+}
+
+// MasterAuthConfig configures authentication options in addition to the standard
+// oauth token and client certificate authenticators
+type MasterAuthConfig struct {
+ // RequestHeader holds options for setting up a front proxy against the API. It is optional.
+ RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"`
+ // WebhookTokenAuthnConfig, if present configures remote token reviewers
+ WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"`
+ // OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization
+ // Server Metadata for an external OAuth server.
+ // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ // This option is mutually exclusive with OAuthConfig
+ OAuthMetadataFile string `json:"oauthMetadataFile"`
+}
+
+// RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire
+// API instead of against the /oauth endpoint.
+type RequestHeaderAuthenticationOptions struct {
+ // ClientCA is a file with the trusted signer certs. It is required.
+ ClientCA string `json:"clientCA"`
+ // ClientCommonNames is a required list of common names to require a match from.
+ ClientCommonNames []string `json:"clientCommonNames"`
+
+ // UsernameHeaders is the list of headers to check for user information. First hit wins.
+ UsernameHeaders []string `json:"usernameHeaders"`
+ // GroupNameHeader is the set of headers to check for group information. All are unioned.
+ GroupHeaders []string `json:"groupHeaders"`
+ // ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.
+ ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"`
+}
+
+// AggregatorConfig holds information required to make the aggregator function.
+type AggregatorConfig struct {
+ // ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers
+ ProxyClientInfo CertInfo `json:"proxyClientInfo"`
+}
+
+type LogFormatType string
+
+type WebHookModeType string
+
+const (
+ // LogFormatLegacy saves event in 1-line text format.
+ LogFormatLegacy LogFormatType = "legacy"
+ // LogFormatJson saves event in structured json format.
+ LogFormatJson LogFormatType = "json"
+
+ // WebHookModeBatch indicates that the webhook should buffer audit events
+ // internally, sending batch updates either once a certain number of
+ // events have been received or a certain amount of time has passed.
+ WebHookModeBatch WebHookModeType = "batch"
+ // WebHookModeBlocking causes the webhook to block on every attempt to process
+ // a set of events. This causes requests to the API server to wait for a
+ // round trip to the external audit service before sending a response.
+ WebHookModeBlocking WebHookModeType = "blocking"
+)
+
+// AuditConfig holds configuration for the audit capabilities
+type AuditConfig struct {
+ // If this flag is set, audit log will be printed in the logs.
+ // The logs contains, method, user and a requested URL.
+ Enabled bool `json:"enabled"`
+ // All requests coming to the apiserver will be logged to this file.
+ AuditFilePath string `json:"auditFilePath"`
+ // Maximum number of days to retain old log files based on the timestamp encoded in their filename.
+ MaximumFileRetentionDays int `json:"maximumFileRetentionDays"`
+ // Maximum number of old log files to retain.
+ MaximumRetainedFiles int `json:"maximumRetainedFiles"`
+ // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.
+ MaximumFileSizeMegabytes int `json:"maximumFileSizeMegabytes"`
+
+ // PolicyFile is a path to the file that defines the audit policy configuration.
+ PolicyFile string `json:"policyFile"`
+ // PolicyConfiguration is an embedded policy configuration object to be used
+ // as the audit policy configuration. If present, it will be used instead of
+ // the path to the policy file.
+ PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"`
+
+ // Format of saved audits (legacy or json).
+ LogFormat LogFormatType `json:"logFormat"`
+
+ // Path to a .kubeconfig formatted file that defines the audit webhook configuration.
+ WebHookKubeConfig string `json:"webHookKubeConfig"`
+ // Strategy for sending audit events (block or batch).
+ WebHookMode WebHookModeType `json:"webHookMode"`
+}
+
+// JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy
+type JenkinsPipelineConfig struct {
+ // AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided
+ // template when the first build config in the project with type JenkinsPipeline
+ // is created. When not specified this option defaults to true.
+ AutoProvisionEnabled *bool `json:"autoProvisionEnabled"`
+ // TemplateNamespace contains the namespace name where the Jenkins template is stored
+ TemplateNamespace string `json:"templateNamespace"`
+ // TemplateName is the name of the default Jenkins template
+ TemplateName string `json:"templateName"`
+ // ServiceName is the name of the Jenkins service OpenShift uses to detect
+ // whether a Jenkins pipeline handler has already been installed in a project.
+ // This value *must* match a service name in the provided template.
+ ServiceName string `json:"serviceName"`
+ // Parameters specifies a set of optional parameters to the Jenkins template.
+ Parameters map[string]string `json:"parameters"`
+}
+
+// ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images
+type ImagePolicyConfig struct {
+ // MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user
+ // does a bulk import of a container repository. This number defaults to 50 to prevent users from
+ // importing large numbers of images accidentally. Set -1 for no limit.
+ MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"`
+ // DisableScheduledImport allows scheduled background import of images to be disabled.
+ DisableScheduledImport bool `json:"disableScheduledImport"`
+ // ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams
+ // scheduled for background import are checked against the upstream repository. The default value is 15 minutes.
+ ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"`
+ // MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the
+ // background per minute. The default value is 60. Set to -1 for unlimited.
+ MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"`
+ // AllowedRegistriesForImport limits the container image registries that normal users may import
+ // images from. Set this list to the registries that you trust to contain valid Docker
+ // images and that you want applications to be able to import from. Users with
+ // permission to create Images or ImageStreamMappings via the API are not affected by
+ // this policy - typically only administrators or system integrations will have those
+ // permissions.
+ AllowedRegistriesForImport *AllowedRegistries `json:"allowedRegistriesForImport,omitempty"`
+ // InternalRegistryHostname sets the hostname for the default internal image
+ // registry. The value must be in "hostname[:port]" format.
+ InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"`
+ // ExternalRegistryHostname sets the hostname for the default external image
+ // registry. The external hostname should be set only when the image registry
+ // is exposed externally. The value is used in 'publicDockerImageRepository'
+ // field in ImageStreams. The value must be in "hostname[:port]" format.
+ ExternalRegistryHostname string `json:"externalRegistryHostname,omitempty"`
+ // AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that
+ // should be trusted during imagestream import.
+ AdditionalTrustedCA string `json:"additionalTrustedCA,omitempty"`
+}
+
+// AllowedRegistries represents a list of registries allowed for the image import.
+type AllowedRegistries []RegistryLocation
+
+// RegistryLocation contains a location of the registry specified by the registry domain
+// name. The domain name might include wildcards, like '*' or '??'.
+type RegistryLocation struct {
+ // DomainName specifies a domain name for the registry
+ // In case the registry use non-standard (80 or 443) port, the port should be included
+ // in the domain name as well.
+ DomainName string `json:"domainName"`
+ // Insecure indicates whether the registry is secure (https) or insecure (http)
+ // By default (if not specified) the registry is assumed as secure.
+ Insecure bool `json:"insecure,omitempty"`
+}
+
+// holds the necessary configuration options for
+type ProjectConfig struct {
+ // DefaultNodeSelector holds default project node label selector
+ DefaultNodeSelector string `json:"defaultNodeSelector"`
+
+ // ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint
+ ProjectRequestMessage string `json:"projectRequestMessage"`
+
+ // ProjectRequestTemplate is the template to use for creating projects in response to projectrequest.
+ // It is in the format namespace/template and it is optional.
+ // If it is not specified, a default template is used.
+ ProjectRequestTemplate string `json:"projectRequestTemplate"`
+
+ // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.
+ SecurityAllocator *SecurityAllocator `json:"securityAllocator"`
+}
+
+// SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.
+type SecurityAllocator struct {
+ // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the
+ // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks
+ // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the
+ // ranges container images will use once user namespaces are started).
+ UIDAllocatorRange string `json:"uidAllocatorRange"`
+ // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is
+ // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels
+ // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated
+ // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default
+ // will allow the server to set them automatically.
+ //
+ // Examples:
+ // * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511
+ // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511
+ //
+ MCSAllocatorRange string `json:"mcsAllocatorRange"`
+ // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS
+ // ranges (100k namespaces, 535k/5 labels).
+ MCSLabelsPerProject int `json:"mcsLabelsPerProject"`
+}
+
+// holds the necessary configuration options for
+type PolicyConfig struct {
+ // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!
+ UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"`
+}
+
+// UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!
+type UserAgentMatchingConfig struct {
+ // If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed
+ RequiredClients []UserAgentMatchRule `json:"requiredClients"`
+
+ // If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes
+ DeniedClients []UserAgentDenyRule `json:"deniedClients"`
+
+ // DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.
+ DefaultRejectionMessage string `json:"defaultRejectionMessage"`
+}
+
+// UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb
+type UserAgentMatchRule struct {
+ // UserAgentRegex is a regex that is checked against the User-Agent.
+ // Known variants of oc clients
+ // 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f
+ // 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f
+ // 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f
+ // 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d
+ // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f
+ Regex string `json:"regex"`
+
+ // HTTPVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs".
+ HTTPVerbs []string `json:"httpVerbs"`
+}
+
+// UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client
+type UserAgentDenyRule struct {
+ UserAgentMatchRule `json:",inline"`
+
+ // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.
+ RejectionMessage string `json:"rejectionMessage"`
+}
+
+// RoutingConfig holds the necessary configuration options for routing to subdomains
+type RoutingConfig struct {
+ // Subdomain is the suffix appended to $service.$namespace. to form the default route hostname
+ // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the
+ // "default" route.
+ Subdomain string `json:"subdomain"`
+}
+
+// MasterNetworkConfig to be passed to the compiled in network plugin
+type MasterNetworkConfig struct {
+ // NetworkPluginName is the name of the network plugin to use
+ NetworkPluginName string `json:"networkPluginName"`
+ // ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.
+ DeprecatedClusterNetworkCIDR string `json:"clusterNetworkCIDR,omitempty"`
+ // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.
+ ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"`
+ // HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.
+ DeprecatedHostSubnetLength uint32 `json:"hostSubnetLength,omitempty"`
+ // ServiceNetwork is the CIDR string to specify the service networks
+ ServiceNetworkCIDR string `json:"serviceNetworkCIDR"`
+ // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP
+ // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that
+ // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You
+ // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.
+ ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"`
+ // IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare
+ // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from.
+ // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips,
+ // nodes, pods, or services.
+ IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"`
+ // VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value
+ VXLANPort uint32 `json:"vxlanPort,omitempty"`
+}
+
+// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.
+type ClusterNetworkEntry struct {
+ // CIDR defines the total range of a cluster networks address space.
+ CIDR string `json:"cidr"`
+ // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.
+ HostSubnetLength uint32 `json:"hostSubnetLength"`
+}
+
+// ImageConfig holds the necessary configuration options for building image names for system components
+type ImageConfig struct {
+ // Format is the format of the name to be built for the system component
+ Format string `json:"format"`
+ // Latest determines if the latest tag will be pulled from the registry
+ Latest bool `json:"latest"`
+}
+
+// RemoteConnectionInfo holds information necessary for establishing a remote connection
+type RemoteConnectionInfo struct {
+ // URL is the remote URL to connect to
+ URL string `json:"url"`
+ // CA is the CA for verifying TLS connections
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information to present
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+}
+
+// KubeletConnectionInfo holds information necessary for connecting to a kubelet
+type KubeletConnectionInfo struct {
+ // Port is the port to connect to kubelets on
+ Port uint `json:"port"`
+ // CA is the CA for verifying TLS connections to kubelets
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information for securing communication to kubelets
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+}
+
+// EtcdConnectionInfo holds information necessary for connecting to an etcd server
+type EtcdConnectionInfo struct {
+ // URLs are the URLs for etcd
+ URLs []string `json:"urls"`
+ // CA is a file containing trusted roots for the etcd server certificates
+ CA string `json:"ca"`
+ // CertInfo is the TLS client cert information for securing communication to etcd
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+}
+
+// EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes
+type EtcdStorageConfig struct {
+ // KubernetesStorageVersion is the API version that Kube resources in etcd should be
+ // serialized to. This value should *not* be advanced until all clients in the
+ // cluster that read from etcd have code that allows them to read the new version.
+ KubernetesStorageVersion string `json:"kubernetesStorageVersion"`
+ // KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will
+ // be rooted under. This value, if changed, will mean existing objects in etcd will
+ // no longer be located. The default value is 'kubernetes.io'.
+ KubernetesStoragePrefix string `json:"kubernetesStoragePrefix"`
+ // OpenShiftStorageVersion is the API version that OS resources in etcd should be
+ // serialized to. This value should *not* be advanced until all clients in the
+ // cluster that read from etcd have code that allows them to read the new version.
+ OpenShiftStorageVersion string `json:"openShiftStorageVersion"`
+ // OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will
+ // be rooted under. This value, if changed, will mean existing objects in etcd will
+ // no longer be located. The default value is 'openshift.io'.
+ OpenShiftStoragePrefix string `json:"openShiftStoragePrefix"`
+}
+
+// ServingInfo holds information about serving web pages
+type ServingInfo struct {
+ // BindAddress is the ip:port to serve on
+ BindAddress string `json:"bindAddress"`
+ // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
+ // "tcp4", and "tcp6"
+ BindNetwork string `json:"bindNetwork"`
+ // CertInfo is the TLS cert info for serving secure traffic.
+ // this is anonymous so that we can inline it for serialization
+ CertInfo `json:",inline"`
+ // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
+ ClientCA string `json:"clientCA"`
+ // NamedCertificates is a list of certificates to use to secure requests to specific hostnames
+ NamedCertificates []NamedCertificate `json:"namedCertificates"`
+ // MinTLSVersion is the minimum TLS version supported.
+ // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
+ MinTLSVersion string `json:"minTLSVersion,omitempty"`
+ // CipherSuites contains an overridden list of ciphers for the server to support.
+ // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants
+ CipherSuites []string `json:"cipherSuites,omitempty"`
+}
+
+// NamedCertificate specifies a certificate/key, and the names it should be served for
+type NamedCertificate struct {
+ // Names is a list of DNS names this certificate should be used to secure
+ // A name can be a normal DNS name, or can contain leading wildcard segments.
+ Names []string `json:"names"`
+ // CertInfo is the TLS cert info for serving secure traffic
+ CertInfo `json:",inline"`
+}
+
+// HTTPServingInfo holds configuration for serving HTTP
+type HTTPServingInfo struct {
+ // ServingInfo is the HTTP serving information
+ ServingInfo `json:",inline"`
+ // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.
+ MaxRequestsInFlight int `json:"maxRequestsInFlight"`
+ // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if
+ // -1 there is no limit on requests.
+ RequestTimeoutSeconds int `json:"requestTimeoutSeconds"`
+}
+
+// MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes
+type MasterClients struct {
+ // OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master
+ OpenShiftLoopbackKubeConfig string `json:"openshiftLoopbackKubeConfig"`
+
+ // OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.
+ OpenShiftLoopbackClientConnectionOverrides *ClientConnectionOverrides `json:"openshiftLoopbackClientConnectionOverrides"`
+}
+
+// ClientConnectionOverrides are a set of overrides to the default client connection settings.
+type ClientConnectionOverrides struct {
+ // AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the
+ // default value of 'application/json'. This field will control all connections to the server used by a particular
+ // client.
+ AcceptContentTypes string `json:"acceptContentTypes"`
+ // ContentType is the content type used when sending data to the server from this client.
+ ContentType string `json:"contentType"`
+
+ // QPS controls the number of queries per second allowed for this connection.
+ QPS float32 `json:"qps"`
+ // Burst allows extra queries to accumulate when a client is exceeding its rate.
+ Burst int32 `json:"burst"`
+}
+
+// DNSConfig holds the necessary configuration options for DNS
+type DNSConfig struct {
+ // BindAddress is the ip:port to serve DNS on
+ BindAddress string `json:"bindAddress"`
+ // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp",
+ // "tcp4", and "tcp6"
+ BindNetwork string `json:"bindNetwork"`
+ // AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open
+ // resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible
+ // to public networks.
+ AllowRecursiveQueries bool `json:"allowRecursiveQueries"`
+}
+
+// WebhookTokenAuthenticators holds the necessary configuation options for
+// external token authenticators
+type WebhookTokenAuthenticator struct {
+ // ConfigFile is a path to a Kubeconfig file with the webhook configuration
+ ConfigFile string `json:"configFile"`
+ // CacheTTL indicates how long an authentication result should be cached.
+ // It takes a valid time duration string (e.g. "5m").
+ // If empty, you get a default timeout of 2 minutes.
+ // If zero (e.g. "0m"), caching is disabled
+ CacheTTL string `json:"cacheTTL"`
+}
+
+// OAuthConfig holds the necessary configuration options for OAuth authentication
+type OAuthConfig struct {
+ // MasterCA is the CA for verifying the TLS connection back to the MasterURL.
+ MasterCA *string `json:"masterCA"`
+
+ // MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens
+ MasterURL string `json:"masterURL"`
+
+ // MasterPublicURL is used for building valid client redirect URLs for internal and external access
+ MasterPublicURL string `json:"masterPublicURL"`
+
+ // AssetPublicURL is used for building valid client redirect URLs for external access
+ AssetPublicURL string `json:"assetPublicURL"`
+
+ // AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.
+ AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"`
+
+ // IdentityProviders is an ordered list of ways for a user to identify themselves
+ IdentityProviders []IdentityProvider `json:"identityProviders"`
+
+ // GrantConfig describes how to handle grants
+ GrantConfig GrantConfig `json:"grantConfig"`
+
+ // SessionConfig hold information about configuring sessions.
+ SessionConfig *SessionConfig `json:"sessionConfig"`
+
+ // TokenConfig contains options for authorization and access tokens
+ TokenConfig TokenConfig `json:"tokenConfig"`
+
+ // Templates allow you to customize pages like the login page.
+ Templates *OAuthTemplates `json:"templates"`
+}
+
+// OAuthTemplates allow for customization of pages like the login page
+type OAuthTemplates struct {
+ // Login is a path to a file containing a go template used to render the login page.
+ // If unspecified, the default login page is used.
+ Login string `json:"login"`
+
+ // ProviderSelection is a path to a file containing a go template used to render the provider selection page.
+ // If unspecified, the default provider selection page is used.
+ ProviderSelection string `json:"providerSelection"`
+
+ // Error is a path to a file containing a go template used to render error pages during the authentication or grant flow
+ // If unspecified, the default error page is used.
+ Error string `json:"error"`
+}
+
+// ServiceAccountConfig holds the necessary configuration options for a service account
+type ServiceAccountConfig struct {
+ // ManagedNames is a list of service account names that will be auto-created in every namespace.
+ // If no names are specified, the ServiceAccountsController will not be started.
+ ManagedNames []string `json:"managedNames"`
+
+ // LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace
+ // without explicitly referencing them
+ LimitSecretReferences bool `json:"limitSecretReferences"`
+
+ // PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens.
+ // If no private key is specified, the service account TokensController will not be started.
+ PrivateKeyFile string `json:"privateKeyFile"`
+
+ // PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key.
+ // (If any file contains a private key, the public portion of the key is used)
+ // The list of public keys is used to verify presented service account tokens.
+ // Each key is tried in order until the list is exhausted or verification succeeds.
+ // If no keys are specified, no service account authentication will be available.
+ PublicKeyFiles []string `json:"publicKeyFiles"`
+
+ // MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically
+ // inject the contents of this file into pods so they can verify connections to the master.
+ MasterCA string `json:"masterCA"`
+}
+
+// TokenConfig holds the necessary configuration options for authorization and access tokens
+type TokenConfig struct {
+ // AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens
+ AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds"`
+ // AccessTokenMaxAgeSeconds defines the maximum age of access tokens
+ AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"`
+ // AccessTokenInactivityTimeoutSeconds defined the default token
+ // inactivity timeout for tokens granted by any client.
+ // Setting it to nil means the feature is completely disabled (default)
+ // The default setting can be overriden on OAuthClient basis.
+ // The value represents the maximum amount of time that can occur between
+ // consecutive uses of the token. Tokens become invalid if they are not
+ // used within this temporal window. The user will need to acquire a new
+ // token to regain access once a token times out.
+ // Valid values are:
+ // - 0: Tokens never time out
+ // - X: Tokens time out if there is no activity for X seconds
+ // The current minimum allowed value for X is 300 (5 minutes)
+ AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"`
+}
+
+// SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession
+type SessionConfig struct {
+ // SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object
+ // If no file is specified, a random signing and encryption key are generated at each server start
+ SessionSecretsFile string `json:"sessionSecretsFile"`
+ // SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession
+ SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"`
+ // SessionName is the cookie name used to store the session
+ SessionName string `json:"sessionName"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type SessionSecrets struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Secrets is a list of secrets
+ // New sessions are signed and encrypted using the first secret.
+ // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.
+ Secrets []SessionSecret `json:"secrets"`
+}
+
+// SessionSecret is a secret used to authenticate/decrypt cookie-based sessions
+type SessionSecret struct {
+ // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.
+ Authentication string `json:"authentication"`
+ // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-
+ Encryption string `json:"encryption"`
+}
+
+// IdentityProvider provides identities for users authenticating using credentials
+type IdentityProvider struct {
+ // Name is used to qualify the identities returned by this provider
+ Name string `json:"name"`
+ // UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider
+ UseAsChallenger bool `json:"challenge"`
+ // UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against
+ UseAsLogin bool `json:"login"`
+ // MappingMethod determines how identities from this provider are mapped to users
+ MappingMethod string `json:"mappingMethod"`
+ // Provider contains the information about how to set up a specific identity provider
+ Provider runtime.RawExtension `json:"provider"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type BasicAuthPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // RemoteConnectionInfo contains information about how to connect to the external basic auth server
+ RemoteConnectionInfo `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type AllowAllPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DenyAllPasswordIdentityProvider provides no identities for users
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type DenyAllPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type HTPasswdPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // File is a reference to your htpasswd file
+ File string `json:"file"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type LDAPPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+ // URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is
+ // ldap://host:port/basedn?attribute?scope?filter
+ URL string `json:"url"`
+ // BindDN is an optional DN to bind with during the search phase.
+ BindDN string `json:"bindDN"`
+ // BindPassword is an optional password to bind with during the search phase.
+ BindPassword StringSource `json:"bindPassword"`
+
+ // Insecure, if true, indicates the connection should not use TLS.
+ // Cannot be set to true with a URL scheme of "ldaps://"
+ // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830
+ Insecure bool `json:"insecure"`
+ // CA is the optional trusted certificate authority bundle to use when making requests to the server
+ // If empty, the default system roots are used
+ CA string `json:"ca"`
+ // Attributes maps LDAP attributes to identities
+ Attributes LDAPAttributeMapping `json:"attributes"`
+}
+
+// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields
+type LDAPAttributeMapping struct {
+ // ID is the list of attributes whose values should be used as the user ID. Required.
+ // LDAP standard identity attribute is "dn"
+ ID []string `json:"id"`
+ // PreferredUsername is the list of attributes whose values should be used as the preferred username.
+ // LDAP standard login attribute is "uid"
+ PreferredUsername []string `json:"preferredUsername"`
+ // Name is the list of attributes whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ // LDAP standard display name attribute is "cn"
+ Name []string `json:"name"`
+ // Email is the list of attributes whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ Email []string `json:"email"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type KeystonePasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+ // RemoteConnectionInfo contains information about how to connect to the keystone server
+ RemoteConnectionInfo `json:",inline"`
+ // Domain Name is required for keystone v3
+ DomainName string `json:"domainName"`
+ // UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username
+ UseKeystoneIdentity bool `json:"useKeystoneIdentity"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type RequestHeaderIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // LoginURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ LoginURL string `json:"loginURL"`
+
+ // ChallengeURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ ChallengeURL string `json:"challengeURL"`
+
+ // ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.
+ ClientCA string `json:"clientCA"`
+ // ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.
+ ClientCommonNames []string `json:"clientCommonNames"`
+
+ // Headers is the set of headers to check for identity information
+ Headers []string `json:"headers"`
+ // PreferredUsernameHeaders is the set of headers to check for the preferred username
+ PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"`
+ // NameHeaders is the set of headers to check for the display name
+ NameHeaders []string `json:"nameHeaders"`
+ // EmailHeaders is the set of headers to check for the email address
+ EmailHeaders []string `json:"emailHeaders"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type GitHubIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ClientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // ClientSecret is the oauth client secret
+ ClientSecret StringSource `json:"clientSecret"`
+ // Organizations optionally restricts which organizations are allowed to log in
+ Organizations []string `json:"organizations"`
+ // Teams optionally restricts which teams are allowed to log in. Format is /.
+ Teams []string `json:"teams"`
+ // Hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise.
+ // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.
+ Hostname string `json:"hostname"`
+ // CA is the optional trusted certificate authority bundle to use when making requests to the server.
+ // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.
+ CA string `json:"ca"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type GitLabIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // CA is the optional trusted certificate authority bundle to use when making requests to the server
+ // If empty, the default system roots are used
+ CA string `json:"ca"`
+ // URL is the oauth server base URL
+ URL string `json:"url"`
+ // ClientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // ClientSecret is the oauth client secret
+ ClientSecret StringSource `json:"clientSecret"`
+ // Legacy determines if OAuth2 or OIDC should be used
+ // If true, OAuth2 is used
+ // If false, OIDC is used
+ // If nil and the URL's host is gitlab.com, OIDC is used
+ // Otherwise, OAuth2 is used
+ // In a future release, nil will default to using OIDC
+ // Eventually this flag will be removed and only OIDC will be used
+ Legacy *bool `json:"legacy,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GoogleIdentityProvider provides identities for users authenticating using Google credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type GoogleIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ClientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // ClientSecret is the oauth client secret
+ ClientSecret StringSource `json:"clientSecret"`
+
+ // HostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to
+ HostedDomain string `json:"hostedDomain"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type OpenIDIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // CA is the optional trusted certificate authority bundle to use when making requests to the server
+ // If empty, the default system roots are used
+ CA string `json:"ca"`
+
+ // ClientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // ClientSecret is the oauth client secret
+ ClientSecret StringSource `json:"clientSecret"`
+
+ // ExtraScopes are any scopes to request in addition to the standard "openid" scope.
+ ExtraScopes []string `json:"extraScopes"`
+
+ // ExtraAuthorizeParameters are any custom parameters to add to the authorize request.
+ ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"`
+
+ // URLs to use to authenticate
+ URLs OpenIDURLs `json:"urls"`
+
+ // Claims mappings
+ Claims OpenIDClaims `json:"claims"`
+}
+
+// OpenIDURLs are URLs to use when authenticating with an OpenID identity provider
+type OpenIDURLs struct {
+ // Authorize is the oauth authorization URL
+ Authorize string `json:"authorize"`
+ // Token is the oauth token granting URL
+ Token string `json:"token"`
+ // UserInfo is the optional userinfo URL.
+ // If present, a granted access_token is used to request claims
+ // If empty, a granted id_token is parsed for claims
+ UserInfo string `json:"userInfo"`
+}
+
+// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider
+type OpenIDClaims struct {
+ // ID is the list of claims whose values should be used as the user ID. Required.
+ // OpenID standard identity claim is "sub"
+ ID []string `json:"id"`
+ // PreferredUsername is the list of claims whose values should be used as the preferred username.
+ // If unspecified, the preferred username is determined from the value of the id claim
+ PreferredUsername []string `json:"preferredUsername"`
+ // Name is the list of claims whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ Name []string `json:"name"`
+ // Email is the list of claims whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ Email []string `json:"email"`
+}
+
+// GrantConfig holds the necessary configuration options for grant handlers
+type GrantConfig struct {
+ // Method determines the default strategy to use when an OAuth client requests a grant.
+ // This method will be used only if the specific OAuth client doesn't provide a strategy
+ // of their own. Valid grant handling methods are:
+ // - auto: always approves grant requests, useful for trusted clients
+ // - prompt: prompts the end user for approval of grant requests, useful for third-party clients
+ // - deny: always denies grant requests, useful for black-listed clients
+ Method GrantHandlerType `json:"method"`
+
+ // ServiceAccountMethod is used for determining client authorization for service account oauth client.
+ // It must be either: deny, prompt
+ ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"`
+}
+
+type GrantHandlerType string
+
+const (
+ // GrantHandlerAuto auto-approves client authorization grant requests
+ GrantHandlerAuto GrantHandlerType = "auto"
+ // GrantHandlerPrompt prompts the user to approve new client authorization grant requests
+ GrantHandlerPrompt GrantHandlerType = "prompt"
+ // GrantHandlerDeny auto-denies client authorization grant requests
+ GrantHandlerDeny GrantHandlerType = "deny"
+)
+
+// EtcdConfig holds the necessary configuration options for connecting with an etcd database
+type EtcdConfig struct {
+ // ServingInfo describes how to start serving the etcd master
+ ServingInfo ServingInfo `json:"servingInfo"`
+ // Address is the advertised host:port for client connections to etcd
+ Address string `json:"address"`
+ // PeerServingInfo describes how to start serving the etcd peer
+ PeerServingInfo ServingInfo `json:"peerServingInfo"`
+ // PeerAddress is the advertised host:port for peer connections to etcd
+ PeerAddress string `json:"peerAddress"`
+
+ // StorageDir is the path to the etcd storage directory
+ StorageDir string `json:"storageDirectory"`
+}
+
+// KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master
+type KubernetesMasterConfig struct {
+ // APILevels is a list of API levels that should be enabled on startup: v1 as examples
+ APILevels []string `json:"apiLevels"`
+ // DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.
+ DisabledAPIGroupVersions map[string][]string `json:"disabledAPIGroupVersions"`
+
+ // MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.
+ MasterIP string `json:"masterIP"`
+ // MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked
+ // at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to
+ // reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and
+ // out of the kubernetes service record. It is not recommended to set this value below 15s.
+ MasterEndpointReconcileTTL int `json:"masterEndpointReconcileTTL"`
+ // ServicesSubnet is the subnet to use for assigning service IPs
+ ServicesSubnet string `json:"servicesSubnet"`
+ // ServicesNodePortRange is the range to use for assigning service public ports on a host.
+ ServicesNodePortRange string `json:"servicesNodePortRange"`
+
+ // SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.
+ SchedulerConfigFile string `json:"schedulerConfigFile"`
+
+ // PodEvictionTimeout controls grace period for deleting pods on failed nodes.
+ // It takes valid time duration string. If empty, you get the default pod eviction timeout.
+ PodEvictionTimeout string `json:"podEvictionTimeout"`
+ // ProxyClientInfo specifies the client cert/key to use when proxying to pods
+ ProxyClientInfo CertInfo `json:"proxyClientInfo"`
+
+ // APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's
+ // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not
+ // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.
+ APIServerArguments ExtendedArguments `json:"apiServerArguments"`
+ // ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the
+ // controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist
+ // the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid
+ // configurations.
+ ControllerArguments ExtendedArguments `json:"controllerArguments"`
+ // SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's
+ // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not
+ // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.
+ SchedulerArguments ExtendedArguments `json:"schedulerArguments"`
+}
+
+// CertInfo relates a certificate with a private key
+type CertInfo struct {
+ // CertFile is a file containing a PEM-encoded certificate
+ CertFile string `json:"certFile"`
+ // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile
+ KeyFile string `json:"keyFile"`
+}
+
+// PodManifestConfig holds the necessary configuration options for using pod manifests
+type PodManifestConfig struct {
+ // Path specifies the path for the pod manifest file or directory
+ // If its a directory, its expected to contain on or more manifest files
+ // This is used by the Kubelet to create pods on the node
+ Path string `json:"path"`
+ // FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data
+ // The interval needs to be a positive value
+ FileCheckIntervalSeconds int64 `json:"fileCheckIntervalSeconds"`
+}
+
+// StringSource allows specifying a string inline, or externally via env var or file.
+// When it contains only a string value, it marshals to a simple JSON string.
+type StringSource struct {
+ // StringSourceSpec specifies the string value, or external location
+ StringSourceSpec `json:",inline"`
+}
+
+// StringSourceSpec specifies a string value, or external location
+type StringSourceSpec struct {
+ // Value specifies the cleartext value, or an encrypted value if keyFile is specified.
+ Value string `json:"value"`
+
+ // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.
+ Env string `json:"env"`
+
+ // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.
+ File string `json:"file"`
+
+ // KeyFile references a file containing the key to use to decrypt the value.
+ KeyFile string `json:"keyFile"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type LDAPSyncConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ // Host is the scheme, host and port of the LDAP server to connect to:
+ // scheme://host:port
+ URL string `json:"url"`
+ // BindDN is an optional DN to bind to the LDAP server with
+ BindDN string `json:"bindDN"`
+ // BindPassword is an optional password to bind with during the search phase.
+ BindPassword StringSource `json:"bindPassword"`
+
+ // Insecure, if true, indicates the connection should not use TLS.
+ // Cannot be set to true with a URL scheme of "ldaps://"
+ // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830
+ Insecure bool `json:"insecure"`
+ // CA is the optional trusted certificate authority bundle to use when making requests to the server
+ // If empty, the default system roots are used
+ CA string `json:"ca"`
+
+ // LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to
+ // OpenShift Group names
+ LDAPGroupUIDToOpenShiftGroupNameMapping map[string]string `json:"groupUIDNameMapping"`
+
+ // RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion
+ // similar to RFC2307: first-class group and user entries, with group membership determined by a
+ // multi-valued attribute on the group entry listing its members
+ RFC2307Config *RFC2307Config `json:"rfc2307,omitempty"`
+
+ // ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a
+ // fashion similar to that used in Active Directory: first-class user entries, with group membership
+ // determined by a multi-valued attribute on members listing groups they are a member of
+ ActiveDirectoryConfig *ActiveDirectoryConfig `json:"activeDirectory,omitempty"`
+
+ // AugmentedActiveDirectoryConfig holds the configuration for extracting data from an LDAP server
+ // set up in a fashion similar to that used in Active Directory as described above, with one addition:
+ // first-class group entries exist and are used to hold metadata but not group membership
+ AugmentedActiveDirectoryConfig *AugmentedActiveDirectoryConfig `json:"augmentedActiveDirectory,omitempty"`
+}
+
+// RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP
+// server using the RFC2307 schema
+type RFC2307Config struct {
+ // AllGroupsQuery holds the template for an LDAP query that returns group entries.
+ AllGroupsQuery LDAPQuery `json:"groupsQuery"`
+
+ // GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier.
+ // (ldapGroupUID)
+ GroupUIDAttribute string `json:"groupUIDAttribute"`
+
+ // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for
+ // an OpenShift group
+ GroupNameAttributes []string `json:"groupNameAttributes"`
+
+ // GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members.
+ // The values contained in those attributes must be queryable by your UserUIDAttribute
+ GroupMembershipAttributes []string `json:"groupMembershipAttributes"`
+
+ // AllUsersQuery holds the template for an LDAP query that returns user entries.
+ AllUsersQuery LDAPQuery `json:"usersQuery"`
+
+ // UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier.
+ // It must correspond to values that will be found from the GroupMembershipAttributes
+ UserUIDAttribute string `json:"userUIDAttribute"`
+
+ // UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name.
+ // The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider
+ UserNameAttributes []string `json:"userNameAttributes"`
+
+ // TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are
+ // encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only
+ // and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find
+ // any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause
+ // group membership to be removed, so it is recommended to use this flag with caution.
+ TolerateMemberNotFoundErrors bool `json:"tolerateMemberNotFoundErrors"`
+
+ // TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries
+ // are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all
+ // user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail
+ // if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP
+ // sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use
+ // this flag with caution.
+ TolerateMemberOutOfScopeErrors bool `json:"tolerateMemberOutOfScopeErrors"`
+}
+
+// ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP
+// server using the Active Directory schema
+type ActiveDirectoryConfig struct {
+ // AllUsersQuery holds the template for an LDAP query that returns user entries.
+ AllUsersQuery LDAPQuery `json:"usersQuery"`
+
+ // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.
+ UserNameAttributes []string `json:"userNameAttributes"`
+
+ // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted
+ // as the groups it is a member of
+ GroupMembershipAttributes []string `json:"groupMembershipAttributes"`
+}
+
+// AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP
+// server using the augmented Active Directory schema
+type AugmentedActiveDirectoryConfig struct {
+ // AllUsersQuery holds the template for an LDAP query that returns user entries.
+ AllUsersQuery LDAPQuery `json:"usersQuery"`
+
+ // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.
+ UserNameAttributes []string `json:"userNameAttributes"`
+
+ // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted
+ // as the groups it is a member of
+ GroupMembershipAttributes []string `json:"groupMembershipAttributes"`
+
+ // AllGroupsQuery holds the template for an LDAP query that returns group entries.
+ AllGroupsQuery LDAPQuery `json:"groupsQuery"`
+
+ // GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier.
+ // (ldapGroupUID)
+ GroupUIDAttribute string `json:"groupUIDAttribute"`
+
+ // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for
+ // an OpenShift group
+ GroupNameAttributes []string `json:"groupNameAttributes"`
+}
+
+// LDAPQuery holds the options necessary to build an LDAP query
+type LDAPQuery struct {
+ // The DN of the branch of the directory where all searches should start from
+ BaseDN string `json:"baseDN"`
+
+ // The (optional) scope of the search. Can be:
+ // base: only the base object,
+ // one: all object on the base level,
+ // sub: the entire subtree
+ // Defaults to the entire subtree if not set
+ Scope string `json:"scope"`
+
+ // The (optional) behavior of the search with regards to alisases. Can be:
+ // never: never dereference aliases,
+ // search: only dereference in searching,
+ // base: only dereference in finding the base object,
+ // always: always dereference
+ // Defaults to always dereferencing if not set
+ DerefAliases string `json:"derefAliases"`
+
+ // TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding
+ // before the wait for a response is given up. If this is 0, no client-side limit is imposed
+ TimeLimit int `json:"timeout"`
+
+ // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN
+ Filter string `json:"filter"`
+
+ // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.
+ PageSize int `json:"pageSize"`
+}
+
+// AdmissionPluginConfig holds the necessary configuration options for admission plugins
+type AdmissionPluginConfig struct {
+ // Location is the path to a configuration file that contains the plugin's
+ // configuration
+ Location string `json:"location"`
+
+ // Configuration is an embedded configuration object to be used as the plugin's
+ // configuration. If present, it will be used instead of the path to the configuration file.
+ Configuration runtime.RawExtension `json:"configuration"`
+}
+
+// AdmissionConfig holds the necessary configuration options for admission
+type AdmissionConfig struct {
+ // PluginConfig allows specifying a configuration file per admission control plugin
+ PluginConfig map[string]*AdmissionPluginConfig `json:"pluginConfig"`
+
+ // PluginOrderOverride is a list of admission control plugin names that will be installed
+ // on the master. Order is significant. If empty, a default list of plugins is used.
+ PluginOrderOverride []string `json:"pluginOrderOverride,omitempty"`
+}
+
+// ControllerConfig holds configuration values for controllers
+type ControllerConfig struct {
+ // Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
+ // named 'foo', '-foo' disables the controller named 'foo'.
+ // Defaults to "*".
+ Controllers []string `json:"controllers"`
+ // Election defines the configuration for electing a controller instance to make changes to
+ // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the
+ // legacy direct etcd election code will be used.
+ Election *ControllerElectionConfig `json:"election"`
+ // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for
+ // pods fulfilling a service to serve with.
+ ServiceServingCert ServiceServingCert `json:"serviceServingCert"`
+}
+
+// ControllerElectionConfig contains configuration values for deciding how a controller
+// will be elected to act as leader.
+type ControllerElectionConfig struct {
+ // LockName is the resource name used to act as the lock for determining which controller
+ // instance should lead.
+ LockName string `json:"lockName"`
+ // LockNamespace is the resource namespace used to act as the lock for determining which
+ // controller instance should lead. It defaults to "kube-system"
+ LockNamespace string `json:"lockNamespace"`
+ // LockResource is the group and resource name to use to coordinate for the controller lock.
+ // If unset, defaults to "configmaps".
+ LockResource GroupResource `json:"lockResource"`
+}
+
+// GroupResource points to a resource by its name and API group.
+type GroupResource struct {
+ // Group is the name of an API group
+ Group string `json:"group"`
+ // Resource is the name of a resource.
+ Resource string `json:"resource"`
+}
+
+// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for
+// pods fulfilling a service to serve with.
+type ServiceServingCert struct {
+ // Signer holds the signing information used to automatically sign serving certificates.
+ // If this value is nil, then certs are not signed automatically.
+ Signer *CertInfo `json:"signer"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DefaultAdmissionConfig can be used to enable or disable various admission plugins.
+// When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it,
+// this will cause an "off by default" admission plugin to be enabled
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type DefaultAdmissionConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Disable turns off an admission plugin that is enabled by default.
+ Disable bool `json:"disable"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildDefaultsConfig controls the default information for Builds
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type BuildDefaultsConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // gitHTTPProxy is the location of the HTTPProxy for Git source
+ GitHTTPProxy string `json:"gitHTTPProxy,omitempty"`
+
+ // gitHTTPSProxy is the location of the HTTPSProxy for Git source
+ GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"`
+
+ // gitNoProxy is the list of domains for which the proxy should not be used
+ GitNoProxy string `json:"gitNoProxy,omitempty"`
+
+ // env is a set of default environment variables that will be applied to the
+ // build if the specified variables do not exist on the build
+ Env []corev1.EnvVar `json:"env,omitempty"`
+
+ // sourceStrategyDefaults are default values that apply to builds using the
+ // source strategy.
+ SourceStrategyDefaults *SourceStrategyDefaultsConfig `json:"sourceStrategyDefaults,omitempty"`
+
+ // imageLabels is a list of labels that are applied to the resulting image.
+ // User can override a default label by providing a label with the same name in their
+ // Build/BuildConfig.
+ ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"`
+
+ // nodeSelector is a selector which must be true for the build pod to fit on a node
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // annotations are annotations that will be added to the build pod
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // resources defines resource requirements to execute the build.
+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+// SourceStrategyDefaultsConfig contains values that apply to builds using the
+// source strategy.
+type SourceStrategyDefaultsConfig struct {
+ // incremental indicates if s2i build strategies should perform an incremental
+ // build or not
+ Incremental *bool `json:"incremental,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildOverridesConfig controls override settings for builds
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type BuildOverridesConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // forcePull indicates whether the build strategy should always be set to ForcePull=true
+ ForcePull bool `json:"forcePull"`
+
+ // imageLabels is a list of labels that are applied to the resulting image.
+ // If user provided a label in their Build/BuildConfig with the same name as one in this
+ // list, the user's label will be overwritten.
+ ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"`
+
+ // nodeSelector is a selector which must be true for the build pod to fit on a node
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // annotations are annotations that will be added to the build pod
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // tolerations is a list of Tolerations that will override any existing
+ // tolerations set on a build pod.
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..17d717ea45
--- /dev/null
+++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go
@@ -0,0 +1,2143 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ buildv1 "github.com/openshift/api/build/v1"
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ActiveDirectoryConfig) DeepCopyInto(out *ActiveDirectoryConfig) {
+ *out = *in
+ out.AllUsersQuery = in.AllUsersQuery
+ if in.UserNameAttributes != nil {
+ in, out := &in.UserNameAttributes, &out.UserNameAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupMembershipAttributes != nil {
+ in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryConfig.
+func (in *ActiveDirectoryConfig) DeepCopy() *ActiveDirectoryConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ActiveDirectoryConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) {
+ *out = *in
+ if in.PluginConfig != nil {
+ in, out := &in.PluginConfig, &out.PluginConfig
+ *out = make(map[string]*AdmissionPluginConfig, len(*in))
+ for key, val := range *in {
+ var outVal *AdmissionPluginConfig
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = new(AdmissionPluginConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.PluginOrderOverride != nil {
+ in, out := &in.PluginOrderOverride, &out.PluginOrderOverride
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig.
+func (in *AdmissionConfig) DeepCopy() *AdmissionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) {
+ *out = *in
+ in.Configuration.DeepCopyInto(&out.Configuration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig.
+func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionPluginConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AggregatorConfig) DeepCopyInto(out *AggregatorConfig) {
+ *out = *in
+ out.ProxyClientInfo = in.ProxyClientInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregatorConfig.
+func (in *AggregatorConfig) DeepCopy() *AggregatorConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AggregatorConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllowAllPasswordIdentityProvider) DeepCopyInto(out *AllowAllPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowAllPasswordIdentityProvider.
+func (in *AllowAllPasswordIdentityProvider) DeepCopy() *AllowAllPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(AllowAllPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AllowAllPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in AllowedRegistries) DeepCopyInto(out *AllowedRegistries) {
+ {
+ in := &in
+ *out = make(AllowedRegistries, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRegistries.
+func (in AllowedRegistries) DeepCopy() AllowedRegistries {
+ if in == nil {
+ return nil
+ }
+ out := new(AllowedRegistries)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditConfig) DeepCopyInto(out *AuditConfig) {
+ *out = *in
+ in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig.
+func (in *AuditConfig) DeepCopy() *AuditConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AugmentedActiveDirectoryConfig) DeepCopyInto(out *AugmentedActiveDirectoryConfig) {
+ *out = *in
+ out.AllUsersQuery = in.AllUsersQuery
+ if in.UserNameAttributes != nil {
+ in, out := &in.UserNameAttributes, &out.UserNameAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupMembershipAttributes != nil {
+ in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.AllGroupsQuery = in.AllGroupsQuery
+ if in.GroupNameAttributes != nil {
+ in, out := &in.GroupNameAttributes, &out.GroupNameAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AugmentedActiveDirectoryConfig.
+func (in *AugmentedActiveDirectoryConfig) DeepCopy() *AugmentedActiveDirectoryConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AugmentedActiveDirectoryConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BasicAuthPasswordIdentityProvider) DeepCopyInto(out *BasicAuthPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.RemoteConnectionInfo = in.RemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthPasswordIdentityProvider.
+func (in *BasicAuthPasswordIdentityProvider) DeepCopy() *BasicAuthPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(BasicAuthPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BasicAuthPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildDefaultsConfig) DeepCopyInto(out *BuildDefaultsConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SourceStrategyDefaults != nil {
+ in, out := &in.SourceStrategyDefaults, &out.SourceStrategyDefaults
+ *out = new(SourceStrategyDefaultsConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]buildv1.ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Resources.DeepCopyInto(&out.Resources)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaultsConfig.
+func (in *BuildDefaultsConfig) DeepCopy() *BuildDefaultsConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildDefaultsConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildDefaultsConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildOverridesConfig) DeepCopyInto(out *BuildOverridesConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]buildv1.ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverridesConfig.
+func (in *BuildOverridesConfig) DeepCopy() *BuildOverridesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildOverridesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildOverridesConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertInfo) DeepCopyInto(out *CertInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo.
+func (in *CertInfo) DeepCopy() *CertInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(CertInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides.
+func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionOverrides)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
+func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerConfig) DeepCopyInto(out *ControllerConfig) {
+ *out = *in
+ if in.Controllers != nil {
+ in, out := &in.Controllers, &out.Controllers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Election != nil {
+ in, out := &in.Election, &out.Election
+ *out = new(ControllerElectionConfig)
+ **out = **in
+ }
+ in.ServiceServingCert.DeepCopyInto(&out.ServiceServingCert)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfig.
+func (in *ControllerConfig) DeepCopy() *ControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerElectionConfig) DeepCopyInto(out *ControllerElectionConfig) {
+ *out = *in
+ out.LockResource = in.LockResource
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerElectionConfig.
+func (in *ControllerElectionConfig) DeepCopy() *ControllerElectionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerElectionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSConfig) DeepCopyInto(out *DNSConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfig.
+func (in *DNSConfig) DeepCopy() *DNSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DefaultAdmissionConfig) DeepCopyInto(out *DefaultAdmissionConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAdmissionConfig.
+func (in *DefaultAdmissionConfig) DeepCopy() *DefaultAdmissionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DefaultAdmissionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DefaultAdmissionConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DenyAllPasswordIdentityProvider) DeepCopyInto(out *DenyAllPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenyAllPasswordIdentityProvider.
+func (in *DenyAllPasswordIdentityProvider) DeepCopy() *DenyAllPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(DenyAllPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DenyAllPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerConfig) DeepCopyInto(out *DockerConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig.
+func (in *DockerConfig) DeepCopy() *DockerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdConfig) DeepCopyInto(out *EtcdConfig) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ in.PeerServingInfo.DeepCopyInto(&out.PeerServingInfo)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConfig.
+func (in *EtcdConfig) DeepCopy() *EtcdConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) {
+ *out = *in
+ if in.URLs != nil {
+ in, out := &in.URLs, &out.URLs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo.
+func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig.
+func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdStorageConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtendedArguments) DeepCopyInto(out *ExtendedArguments) {
+ {
+ in := &in
+ *out = make(ExtendedArguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedArguments.
+func (in ExtendedArguments) DeepCopy() ExtendedArguments {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtendedArguments)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in FeatureList) DeepCopyInto(out *FeatureList) {
+ {
+ in := &in
+ *out = make(FeatureList, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureList.
+func (in FeatureList) DeepCopy() FeatureList {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureList)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ if in.Organizations != nil {
+ in, out := &in.Organizations, &out.Organizations
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Teams != nil {
+ in, out := &in.Teams, &out.Teams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider.
+func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitHubIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GitHubIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ if in.Legacy != nil {
+ in, out := &in.Legacy, &out.Legacy
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider.
+func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitLabIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GitLabIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider.
+func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GoogleIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GoogleIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GrantConfig) DeepCopyInto(out *GrantConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantConfig.
+func (in *GrantConfig) DeepCopy() *GrantConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GrantConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupResource) DeepCopyInto(out *GroupResource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource.
+func (in *GroupResource) DeepCopy() *GroupResource {
+ if in == nil {
+ return nil
+ }
+ out := new(GroupResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTPasswdPasswordIdentityProvider) DeepCopyInto(out *HTPasswdPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdPasswordIdentityProvider.
+func (in *HTPasswdPasswordIdentityProvider) DeepCopy() *HTPasswdPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(HTPasswdPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HTPasswdPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) {
+ *out = *in
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo.
+func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPServingInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) {
+ *out = *in
+ in.Provider.DeepCopyInto(&out.Provider)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider.
+func (in *IdentityProvider) DeepCopy() *IdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageConfig) DeepCopyInto(out *ImageConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfig.
+func (in *ImageConfig) DeepCopy() *ImageConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePolicyConfig) DeepCopyInto(out *ImagePolicyConfig) {
+ *out = *in
+ if in.AllowedRegistriesForImport != nil {
+ in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport
+ *out = new(AllowedRegistries)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]RegistryLocation, len(*in))
+ copy(*out, *in)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyConfig.
+func (in *ImagePolicyConfig) DeepCopy() *ImagePolicyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePolicyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JenkinsPipelineConfig) DeepCopyInto(out *JenkinsPipelineConfig) {
+ *out = *in
+ if in.AutoProvisionEnabled != nil {
+ in, out := &in.AutoProvisionEnabled, &out.AutoProvisionEnabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineConfig.
+func (in *JenkinsPipelineConfig) DeepCopy() *JenkinsPipelineConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(JenkinsPipelineConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KeystonePasswordIdentityProvider) DeepCopyInto(out *KeystonePasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.RemoteConnectionInfo = in.RemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystonePasswordIdentityProvider.
+func (in *KeystonePasswordIdentityProvider) DeepCopy() *KeystonePasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(KeystonePasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KeystonePasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeletConnectionInfo) DeepCopyInto(out *KubeletConnectionInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConnectionInfo.
+func (in *KubeletConnectionInfo) DeepCopy() *KubeletConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeletConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubernetesMasterConfig) DeepCopyInto(out *KubernetesMasterConfig) {
+ *out = *in
+ if in.APILevels != nil {
+ in, out := &in.APILevels, &out.APILevels
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DisabledAPIGroupVersions != nil {
+ in, out := &in.DisabledAPIGroupVersions, &out.DisabledAPIGroupVersions
+ *out = make(map[string][]string, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ out.ProxyClientInfo = in.ProxyClientInfo
+ if in.APIServerArguments != nil {
+ in, out := &in.APIServerArguments, &out.APIServerArguments
+ *out = make(ExtendedArguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.ControllerArguments != nil {
+ in, out := &in.ControllerArguments, &out.ControllerArguments
+ *out = make(ExtendedArguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.SchedulerArguments != nil {
+ in, out := &in.SchedulerArguments, &out.SchedulerArguments
+ *out = make(ExtendedArguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesMasterConfig.
+func (in *KubernetesMasterConfig) DeepCopy() *KubernetesMasterConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(KubernetesMasterConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping.
+func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPAttributeMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPPasswordIdentityProvider) DeepCopyInto(out *LDAPPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.BindPassword = in.BindPassword
+ in.Attributes.DeepCopyInto(&out.Attributes)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPPasswordIdentityProvider.
+func (in *LDAPPasswordIdentityProvider) DeepCopy() *LDAPPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LDAPPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPQuery) DeepCopyInto(out *LDAPQuery) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPQuery.
+func (in *LDAPQuery) DeepCopy() *LDAPQuery {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPQuery)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPSyncConfig) DeepCopyInto(out *LDAPSyncConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.BindPassword = in.BindPassword
+ if in.LDAPGroupUIDToOpenShiftGroupNameMapping != nil {
+ in, out := &in.LDAPGroupUIDToOpenShiftGroupNameMapping, &out.LDAPGroupUIDToOpenShiftGroupNameMapping
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.RFC2307Config != nil {
+ in, out := &in.RFC2307Config, &out.RFC2307Config
+ *out = new(RFC2307Config)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ActiveDirectoryConfig != nil {
+ in, out := &in.ActiveDirectoryConfig, &out.ActiveDirectoryConfig
+ *out = new(ActiveDirectoryConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AugmentedActiveDirectoryConfig != nil {
+ in, out := &in.AugmentedActiveDirectoryConfig, &out.AugmentedActiveDirectoryConfig
+ *out = new(AugmentedActiveDirectoryConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPSyncConfig.
+func (in *LDAPSyncConfig) DeepCopy() *LDAPSyncConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPSyncConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LDAPSyncConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalQuota) DeepCopyInto(out *LocalQuota) {
+ *out = *in
+ if in.PerFSGroup != nil {
+ in, out := &in.PerFSGroup, &out.PerFSGroup
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalQuota.
+func (in *LocalQuota) DeepCopy() *LocalQuota {
+ if in == nil {
+ return nil
+ }
+ out := new(LocalQuota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterAuthConfig) DeepCopyInto(out *MasterAuthConfig) {
+ *out = *in
+ if in.RequestHeader != nil {
+ in, out := &in.RequestHeader, &out.RequestHeader
+ *out = new(RequestHeaderAuthenticationOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.WebhookTokenAuthenticators != nil {
+ in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators
+ *out = make([]WebhookTokenAuthenticator, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthConfig.
+func (in *MasterAuthConfig) DeepCopy() *MasterAuthConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterAuthConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterClients) DeepCopyInto(out *MasterClients) {
+ *out = *in
+ if in.OpenShiftLoopbackClientConnectionOverrides != nil {
+ in, out := &in.OpenShiftLoopbackClientConnectionOverrides, &out.OpenShiftLoopbackClientConnectionOverrides
+ *out = new(ClientConnectionOverrides)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterClients.
+func (in *MasterClients) DeepCopy() *MasterClients {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterClients)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterConfig) DeepCopyInto(out *MasterConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ in.AuthConfig.DeepCopyInto(&out.AuthConfig)
+ out.AggregatorConfig = in.AggregatorConfig
+ if in.CORSAllowedOrigins != nil {
+ in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.APILevels != nil {
+ in, out := &in.APILevels, &out.APILevels
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig)
+ in.ControllerConfig.DeepCopyInto(&out.ControllerConfig)
+ out.EtcdStorageConfig = in.EtcdStorageConfig
+ in.EtcdClientInfo.DeepCopyInto(&out.EtcdClientInfo)
+ out.KubeletClientInfo = in.KubeletClientInfo
+ in.KubernetesMasterConfig.DeepCopyInto(&out.KubernetesMasterConfig)
+ if in.EtcdConfig != nil {
+ in, out := &in.EtcdConfig, &out.EtcdConfig
+ *out = new(EtcdConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OAuthConfig != nil {
+ in, out := &in.OAuthConfig, &out.OAuthConfig
+ *out = new(OAuthConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DNSConfig != nil {
+ in, out := &in.DNSConfig, &out.DNSConfig
+ *out = new(DNSConfig)
+ **out = **in
+ }
+ in.ServiceAccountConfig.DeepCopyInto(&out.ServiceAccountConfig)
+ in.MasterClients.DeepCopyInto(&out.MasterClients)
+ out.ImageConfig = in.ImageConfig
+ in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig)
+ in.PolicyConfig.DeepCopyInto(&out.PolicyConfig)
+ in.ProjectConfig.DeepCopyInto(&out.ProjectConfig)
+ out.RoutingConfig = in.RoutingConfig
+ in.NetworkConfig.DeepCopyInto(&out.NetworkConfig)
+ in.VolumeConfig.DeepCopyInto(&out.VolumeConfig)
+ in.JenkinsPipelineConfig.DeepCopyInto(&out.JenkinsPipelineConfig)
+ in.AuditConfig.DeepCopyInto(&out.AuditConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfig.
+func (in *MasterConfig) DeepCopy() *MasterConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MasterConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterNetworkConfig) DeepCopyInto(out *MasterNetworkConfig) {
+ *out = *in
+ if in.ClusterNetworks != nil {
+ in, out := &in.ClusterNetworks, &out.ClusterNetworks
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExternalIPNetworkCIDRs != nil {
+ in, out := &in.ExternalIPNetworkCIDRs, &out.ExternalIPNetworkCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNetworkConfig.
+func (in *MasterNetworkConfig) DeepCopy() *MasterNetworkConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterNetworkConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterVolumeConfig) DeepCopyInto(out *MasterVolumeConfig) {
+ *out = *in
+ if in.DynamicProvisioningEnabled != nil {
+ in, out := &in.DynamicProvisioningEnabled, &out.DynamicProvisioningEnabled
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterVolumeConfig.
+func (in *MasterVolumeConfig) DeepCopy() *MasterVolumeConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterVolumeConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) {
+ *out = *in
+ if in.Names != nil {
+ in, out := &in.Names, &out.Names
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate.
+func (in *NamedCertificate) DeepCopy() *NamedCertificate {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedCertificate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeAuthConfig) DeepCopyInto(out *NodeAuthConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAuthConfig.
+func (in *NodeAuthConfig) DeepCopy() *NodeAuthConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeAuthConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeConfig) DeepCopyInto(out *NodeConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ if in.MasterClientConnectionOverrides != nil {
+ in, out := &in.MasterClientConnectionOverrides, &out.MasterClientConnectionOverrides
+ *out = new(ClientConnectionOverrides)
+ **out = **in
+ }
+ if in.DNSNameservers != nil {
+ in, out := &in.DNSNameservers, &out.DNSNameservers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.NetworkConfig = in.NetworkConfig
+ out.ImageConfig = in.ImageConfig
+ if in.PodManifestConfig != nil {
+ in, out := &in.PodManifestConfig, &out.PodManifestConfig
+ *out = new(PodManifestConfig)
+ **out = **in
+ }
+ out.AuthConfig = in.AuthConfig
+ out.DockerConfig = in.DockerConfig
+ if in.KubeletArguments != nil {
+ in, out := &in.KubeletArguments, &out.KubeletArguments
+ *out = make(ExtendedArguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.ProxyArguments != nil {
+ in, out := &in.ProxyArguments, &out.ProxyArguments
+ *out = make(ExtendedArguments, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.EnableUnidling != nil {
+ in, out := &in.EnableUnidling, &out.EnableUnidling
+ *out = new(bool)
+ **out = **in
+ }
+ in.VolumeConfig.DeepCopyInto(&out.VolumeConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfig.
+func (in *NodeConfig) DeepCopy() *NodeConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeNetworkConfig) DeepCopyInto(out *NodeNetworkConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkConfig.
+func (in *NodeNetworkConfig) DeepCopy() *NodeNetworkConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeNetworkConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeVolumeConfig) DeepCopyInto(out *NodeVolumeConfig) {
+ *out = *in
+ in.LocalQuota.DeepCopyInto(&out.LocalQuota)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeVolumeConfig.
+func (in *NodeVolumeConfig) DeepCopy() *NodeVolumeConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeVolumeConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthConfig) DeepCopyInto(out *OAuthConfig) {
+ *out = *in
+ if in.MasterCA != nil {
+ in, out := &in.MasterCA, &out.MasterCA
+ *out = new(string)
+ **out = **in
+ }
+ if in.IdentityProviders != nil {
+ in, out := &in.IdentityProviders, &out.IdentityProviders
+ *out = make([]IdentityProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.GrantConfig = in.GrantConfig
+ if in.SessionConfig != nil {
+ in, out := &in.SessionConfig, &out.SessionConfig
+ *out = new(SessionConfig)
+ **out = **in
+ }
+ in.TokenConfig.DeepCopyInto(&out.TokenConfig)
+ if in.Templates != nil {
+ in, out := &in.Templates, &out.Templates
+ *out = new(OAuthTemplates)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthConfig.
+func (in *OAuthConfig) DeepCopy() *OAuthConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates.
+func (in *OAuthTemplates) DeepCopy() *OAuthTemplates {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthTemplates)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims.
+func (in *OpenIDClaims) DeepCopy() *OpenIDClaims {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDClaims)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ if in.ExtraScopes != nil {
+ in, out := &in.ExtraScopes, &out.ExtraScopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExtraAuthorizeParameters != nil {
+ in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.URLs = in.URLs
+ in.Claims.DeepCopyInto(&out.Claims)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider.
+func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenIDIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDURLs) DeepCopyInto(out *OpenIDURLs) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDURLs.
+func (in *OpenIDURLs) DeepCopy() *OpenIDURLs {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDURLs)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodManifestConfig) DeepCopyInto(out *PodManifestConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodManifestConfig.
+func (in *PodManifestConfig) DeepCopy() *PodManifestConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(PodManifestConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyConfig) DeepCopyInto(out *PolicyConfig) {
+ *out = *in
+ in.UserAgentMatchingConfig.DeepCopyInto(&out.UserAgentMatchingConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyConfig.
+func (in *PolicyConfig) DeepCopy() *PolicyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectConfig) DeepCopyInto(out *ProjectConfig) {
+ *out = *in
+ if in.SecurityAllocator != nil {
+ in, out := &in.SecurityAllocator, &out.SecurityAllocator
+ *out = new(SecurityAllocator)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfig.
+func (in *ProjectConfig) DeepCopy() *ProjectConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RFC2307Config) DeepCopyInto(out *RFC2307Config) {
+ *out = *in
+ out.AllGroupsQuery = in.AllGroupsQuery
+ if in.GroupNameAttributes != nil {
+ in, out := &in.GroupNameAttributes, &out.GroupNameAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupMembershipAttributes != nil {
+ in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.AllUsersQuery = in.AllUsersQuery
+ if in.UserNameAttributes != nil {
+ in, out := &in.UserNameAttributes, &out.UserNameAttributes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RFC2307Config.
+func (in *RFC2307Config) DeepCopy() *RFC2307Config {
+ if in == nil {
+ return nil
+ }
+ out := new(RFC2307Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation.
+func (in *RegistryLocation) DeepCopy() *RegistryLocation {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistryLocation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo.
+func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteConnectionInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestHeaderAuthenticationOptions) DeepCopyInto(out *RequestHeaderAuthenticationOptions) {
+ *out = *in
+ if in.ClientCommonNames != nil {
+ in, out := &in.ClientCommonNames, &out.ClientCommonNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsernameHeaders != nil {
+ in, out := &in.UsernameHeaders, &out.UsernameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupHeaders != nil {
+ in, out := &in.GroupHeaders, &out.GroupHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExtraHeaderPrefixes != nil {
+ in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAuthenticationOptions.
+func (in *RequestHeaderAuthenticationOptions) DeepCopy() *RequestHeaderAuthenticationOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(RequestHeaderAuthenticationOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.ClientCommonNames != nil {
+ in, out := &in.ClientCommonNames, &out.ClientCommonNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsernameHeaders != nil {
+ in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NameHeaders != nil {
+ in, out := &in.NameHeaders, &out.NameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.EmailHeaders != nil {
+ in, out := &in.EmailHeaders, &out.EmailHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider.
+func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(RequestHeaderIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RequestHeaderIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoutingConfig) DeepCopyInto(out *RoutingConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfig.
+func (in *RoutingConfig) DeepCopy() *RoutingConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RoutingConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecurityAllocator) DeepCopyInto(out *SecurityAllocator) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityAllocator.
+func (in *SecurityAllocator) DeepCopy() *SecurityAllocator {
+ if in == nil {
+ return nil
+ }
+ out := new(SecurityAllocator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) {
+ *out = *in
+ if in.ManagedNames != nil {
+ in, out := &in.ManagedNames, &out.ManagedNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PublicKeyFiles != nil {
+ in, out := &in.PublicKeyFiles, &out.PublicKeyFiles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig.
+func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) {
+ *out = *in
+ if in.Signer != nil {
+ in, out := &in.Signer, &out.Signer
+ *out = new(CertInfo)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert.
+func (in *ServiceServingCert) DeepCopy() *ServiceServingCert {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceServingCert)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServingInfo) DeepCopyInto(out *ServingInfo) {
+ *out = *in
+ out.CertInfo = in.CertInfo
+ if in.NamedCertificates != nil {
+ in, out := &in.NamedCertificates, &out.NamedCertificates
+ *out = make([]NamedCertificate, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CipherSuites != nil {
+ in, out := &in.CipherSuites, &out.CipherSuites
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo.
+func (in *ServingInfo) DeepCopy() *ServingInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ServingInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionConfig) DeepCopyInto(out *SessionConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionConfig.
+func (in *SessionConfig) DeepCopy() *SessionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SessionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionSecret) DeepCopyInto(out *SessionSecret) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecret.
+func (in *SessionSecret) DeepCopy() *SessionSecret {
+ if in == nil {
+ return nil
+ }
+ out := new(SessionSecret)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionSecrets) DeepCopyInto(out *SessionSecrets) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Secrets != nil {
+ in, out := &in.Secrets, &out.Secrets
+ *out = make([]SessionSecret, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecrets.
+func (in *SessionSecrets) DeepCopy() *SessionSecrets {
+ if in == nil {
+ return nil
+ }
+ out := new(SessionSecrets)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SessionSecrets) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SourceStrategyDefaultsConfig) DeepCopyInto(out *SourceStrategyDefaultsConfig) {
+ *out = *in
+ if in.Incremental != nil {
+ in, out := &in.Incremental, &out.Incremental
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyDefaultsConfig.
+func (in *SourceStrategyDefaultsConfig) DeepCopy() *SourceStrategyDefaultsConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SourceStrategyDefaultsConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StringSource) DeepCopyInto(out *StringSource) {
+ *out = *in
+ out.StringSourceSpec = in.StringSourceSpec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource.
+func (in *StringSource) DeepCopy() *StringSource {
+ if in == nil {
+ return nil
+ }
+ out := new(StringSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec.
+func (in *StringSourceSpec) DeepCopy() *StringSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StringSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenConfig) DeepCopyInto(out *TokenConfig) {
+ *out = *in
+ if in.AccessTokenInactivityTimeoutSeconds != nil {
+ in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig.
+func (in *TokenConfig) DeepCopy() *TokenConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserAgentDenyRule) DeepCopyInto(out *UserAgentDenyRule) {
+ *out = *in
+ in.UserAgentMatchRule.DeepCopyInto(&out.UserAgentMatchRule)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentDenyRule.
+func (in *UserAgentDenyRule) DeepCopy() *UserAgentDenyRule {
+ if in == nil {
+ return nil
+ }
+ out := new(UserAgentDenyRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserAgentMatchRule) DeepCopyInto(out *UserAgentMatchRule) {
+ *out = *in
+ if in.HTTPVerbs != nil {
+ in, out := &in.HTTPVerbs, &out.HTTPVerbs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchRule.
+func (in *UserAgentMatchRule) DeepCopy() *UserAgentMatchRule {
+ if in == nil {
+ return nil
+ }
+ out := new(UserAgentMatchRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserAgentMatchingConfig) DeepCopyInto(out *UserAgentMatchingConfig) {
+ *out = *in
+ if in.RequiredClients != nil {
+ in, out := &in.RequiredClients, &out.RequiredClients
+ *out = make([]UserAgentMatchRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DeniedClients != nil {
+ in, out := &in.DeniedClients, &out.DeniedClients
+ *out = make([]UserAgentDenyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchingConfig.
+func (in *UserAgentMatchingConfig) DeepCopy() *UserAgentMatchingConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(UserAgentMatchingConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator.
+func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator {
+ if in == nil {
+ return nil
+ }
+ out := new(WebhookTokenAuthenticator)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..daa0868b6b
--- /dev/null
+++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,977 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_ActiveDirectoryConfig = map[string]string{
+ "": "ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the Active Directory schema",
+ "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.",
+ "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.",
+ "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of",
+}
+
+func (ActiveDirectoryConfig) SwaggerDoc() map[string]string {
+ return map_ActiveDirectoryConfig
+}
+
+var map_AdmissionConfig = map[string]string{
+ "": "AdmissionConfig holds the necessary configuration options for admission",
+ "pluginConfig": "PluginConfig allows specifying a configuration file per admission control plugin",
+ "pluginOrderOverride": "PluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.",
+}
+
+func (AdmissionConfig) SwaggerDoc() map[string]string {
+ return map_AdmissionConfig
+}
+
+var map_AdmissionPluginConfig = map[string]string{
+ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins",
+ "location": "Location is the path to a configuration file that contains the plugin's configuration",
+ "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.",
+}
+
+func (AdmissionPluginConfig) SwaggerDoc() map[string]string {
+ return map_AdmissionPluginConfig
+}
+
+var map_AggregatorConfig = map[string]string{
+ "": "AggregatorConfig holds information required to make the aggregator function.",
+ "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers",
+}
+
+func (AggregatorConfig) SwaggerDoc() map[string]string {
+ return map_AggregatorConfig
+}
+
+var map_AllowAllPasswordIdentityProvider = map[string]string{
+ "": "AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (AllowAllPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_AllowAllPasswordIdentityProvider
+}
+
+var map_AuditConfig = map[string]string{
+ "": "AuditConfig holds configuration for the audit capabilities",
+ "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.",
+ "auditFilePath": "All requests coming to the apiserver will be logged to this file.",
+ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.",
+ "maximumRetainedFiles": "Maximum number of old log files to retain.",
+ "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.",
+ "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.",
+ "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.",
+ "logFormat": "Format of saved audits (legacy or json).",
+ "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.",
+ "webHookMode": "Strategy for sending audit events (block or batch).",
+}
+
+func (AuditConfig) SwaggerDoc() map[string]string {
+ return map_AuditConfig
+}
+
+var map_AugmentedActiveDirectoryConfig = map[string]string{
+ "": "AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the augmented Active Directory schema",
+ "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.",
+ "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.",
+ "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of",
+ "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.",
+ "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)",
+ "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group",
+}
+
+func (AugmentedActiveDirectoryConfig) SwaggerDoc() map[string]string {
+ return map_AugmentedActiveDirectoryConfig
+}
+
+var map_BasicAuthPasswordIdentityProvider = map[string]string{
+ "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (BasicAuthPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_BasicAuthPasswordIdentityProvider
+}
+
+var map_BuildDefaultsConfig = map[string]string{
+ "": "BuildDefaultsConfig controls the default information for Builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source",
+ "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source",
+ "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used",
+ "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
+ "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.",
+ "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
+ "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node",
+ "annotations": "annotations are annotations that will be added to the build pod",
+ "resources": "resources defines resource requirements to execute the build.",
+}
+
+func (BuildDefaultsConfig) SwaggerDoc() map[string]string {
+ return map_BuildDefaultsConfig
+}
+
+var map_BuildOverridesConfig = map[string]string{
+ "": "BuildOverridesConfig controls override settings for builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "forcePull": "forcePull indicates whether the build strategy should always be set to ForcePull=true",
+ "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
+ "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node",
+ "annotations": "annotations are annotations that will be added to the build pod",
+ "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
+}
+
+func (BuildOverridesConfig) SwaggerDoc() map[string]string {
+ return map_BuildOverridesConfig
+}
+
+var map_CertInfo = map[string]string{
+ "": "CertInfo relates a certificate with a private key",
+ "certFile": "CertFile is a file containing a PEM-encoded certificate",
+ "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile",
+}
+
+func (CertInfo) SwaggerDoc() map[string]string {
+ return map_CertInfo
+}
+
+var map_ClientConnectionOverrides = map[string]string{
+ "": "ClientConnectionOverrides are a set of overrides to the default client connection settings.",
+ "acceptContentTypes": "AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.",
+ "contentType": "ContentType is the content type used when sending data to the server from this client.",
+ "qps": "QPS controls the number of queries per second allowed for this connection.",
+ "burst": "Burst allows extra queries to accumulate when a client is exceeding its rate.",
+}
+
+func (ClientConnectionOverrides) SwaggerDoc() map[string]string {
+ return map_ClientConnectionOverrides
+}
+
+var map_ClusterNetworkEntry = map[string]string{
+ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.",
+ "cidr": "CIDR defines the total range of a cluster networks address space.",
+ "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.",
+}
+
+func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
+ return map_ClusterNetworkEntry
+}
+
+var map_ControllerConfig = map[string]string{
+ "": "ControllerConfig holds configuration values for controllers",
+ "controllers": "Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".",
+ "election": "Election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.",
+ "serviceServingCert": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.",
+}
+
+func (ControllerConfig) SwaggerDoc() map[string]string {
+ return map_ControllerConfig
+}
+
+var map_ControllerElectionConfig = map[string]string{
+ "": "ControllerElectionConfig contains configuration values for deciding how a controller will be elected to act as leader.",
+ "lockName": "LockName is the resource name used to act as the lock for determining which controller instance should lead.",
+ "lockNamespace": "LockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"",
+ "lockResource": "LockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".",
+}
+
+func (ControllerElectionConfig) SwaggerDoc() map[string]string {
+ return map_ControllerElectionConfig
+}
+
+var map_DNSConfig = map[string]string{
+ "": "DNSConfig holds the necessary configuration options for DNS",
+ "bindAddress": "BindAddress is the ip:port to serve DNS on",
+ "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
+ "allowRecursiveQueries": "AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.",
+}
+
+func (DNSConfig) SwaggerDoc() map[string]string {
+ return map_DNSConfig
+}
+
+var map_DefaultAdmissionConfig = map[string]string{
+ "": "DefaultAdmissionConfig can be used to enable or disable various admission plugins. When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it, this will cause an \"off by default\" admission plugin to be enabled\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "disable": "Disable turns off an admission plugin that is enabled by default.",
+}
+
+func (DefaultAdmissionConfig) SwaggerDoc() map[string]string {
+ return map_DefaultAdmissionConfig
+}
+
+var map_DenyAllPasswordIdentityProvider = map[string]string{
+ "": "DenyAllPasswordIdentityProvider provides no identities for users\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_DenyAllPasswordIdentityProvider
+}
+
+var map_DockerConfig = map[string]string{
+ "": "DockerConfig holds Docker related configuration options.",
+ "execHandlerName": "ExecHandlerName is the name of the handler to use for executing commands in containers.",
+ "dockerShimSocket": "DockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'",
+ "dockerShimRootDirectory": "DockershimRootDirectory is the dockershim root directory.",
+}
+
+func (DockerConfig) SwaggerDoc() map[string]string {
+ return map_DockerConfig
+}
+
+var map_EtcdConfig = map[string]string{
+ "": "EtcdConfig holds the necessary configuration options for connecting with an etcd database",
+ "servingInfo": "ServingInfo describes how to start serving the etcd master",
+ "address": "Address is the advertised host:port for client connections to etcd",
+ "peerServingInfo": "PeerServingInfo describes how to start serving the etcd peer",
+ "peerAddress": "PeerAddress is the advertised host:port for peer connections to etcd",
+ "storageDirectory": "StorageDir is the path to the etcd storage directory",
+}
+
+func (EtcdConfig) SwaggerDoc() map[string]string {
+ return map_EtcdConfig
+}
+
+var map_EtcdConnectionInfo = map[string]string{
+ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server",
+ "urls": "URLs are the URLs for etcd",
+ "ca": "CA is a file containing trusted roots for the etcd server certificates",
+}
+
+func (EtcdConnectionInfo) SwaggerDoc() map[string]string {
+ return map_EtcdConnectionInfo
+}
+
+var map_EtcdStorageConfig = map[string]string{
+ "": "EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes",
+ "kubernetesStorageVersion": "KubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.",
+ "kubernetesStoragePrefix": "KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.",
+ "openShiftStorageVersion": "OpenShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.",
+ "openShiftStoragePrefix": "OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.",
+}
+
+func (EtcdStorageConfig) SwaggerDoc() map[string]string {
+ return map_EtcdStorageConfig
+}
+
+var map_GitHubIdentityProvider = map[string]string{
+ "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "clientID": "ClientID is the oauth client ID",
+ "clientSecret": "ClientSecret is the oauth client secret",
+ "organizations": "Organizations optionally restricts which organizations are allowed to log in",
+ "teams": "Teams optionally restricts which teams are allowed to log in. Format is /.",
+ "hostname": "Hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.",
+ "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.",
+}
+
+func (GitHubIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitHubIdentityProvider
+}
+
+var map_GitLabIdentityProvider = map[string]string{
+ "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used",
+ "url": "URL is the oauth server base URL",
+ "clientID": "ClientID is the oauth client ID",
+ "clientSecret": "ClientSecret is the oauth client secret",
+ "legacy": "Legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used",
+}
+
+func (GitLabIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitLabIdentityProvider
+}
+
+var map_GoogleIdentityProvider = map[string]string{
+ "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "clientID": "ClientID is the oauth client ID",
+ "clientSecret": "ClientSecret is the oauth client secret",
+ "hostedDomain": "HostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to",
+}
+
+func (GoogleIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GoogleIdentityProvider
+}
+
+var map_GrantConfig = map[string]string{
+ "": "GrantConfig holds the necessary configuration options for grant handlers",
+ "method": "Method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients",
+ "serviceAccountMethod": "ServiceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt",
+}
+
+func (GrantConfig) SwaggerDoc() map[string]string {
+ return map_GrantConfig
+}
+
+var map_GroupResource = map[string]string{
+ "": "GroupResource points to a resource by its name and API group.",
+ "group": "Group is the name of an API group",
+ "resource": "Resource is the name of a resource.",
+}
+
+func (GroupResource) SwaggerDoc() map[string]string {
+ return map_GroupResource
+}
+
+var map_HTPasswdPasswordIdentityProvider = map[string]string{
+ "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "file": "File is a reference to your htpasswd file",
+}
+
+func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_HTPasswdPasswordIdentityProvider
+}
+
+var map_HTTPServingInfo = map[string]string{
+ "": "HTTPServingInfo holds configuration for serving HTTP",
+ "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.",
+ "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.",
+}
+
+func (HTTPServingInfo) SwaggerDoc() map[string]string {
+ return map_HTTPServingInfo
+}
+
+var map_IdentityProvider = map[string]string{
+ "": "IdentityProvider provides identities for users authenticating using credentials",
+ "name": "Name is used to qualify the identities returned by this provider",
+ "challenge": "UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider",
+ "login": "UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against",
+ "mappingMethod": "MappingMethod determines how identities from this provider are mapped to users",
+ "provider": "Provider contains the information about how to set up a specific identity provider",
+}
+
+func (IdentityProvider) SwaggerDoc() map[string]string {
+ return map_IdentityProvider
+}
+
+var map_ImageConfig = map[string]string{
+ "": "ImageConfig holds the necessary configuration options for building image names for system components",
+ "format": "Format is the format of the name to be built for the system component",
+ "latest": "Latest determines if the latest tag will be pulled from the registry",
+}
+
+func (ImageConfig) SwaggerDoc() map[string]string {
+ return map_ImageConfig
+}
+
+var map_ImagePolicyConfig = map[string]string{
+ "": "ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images",
+ "maxImagesBulkImportedPerRepository": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.",
+ "disableScheduledImport": "DisableScheduledImport allows scheduled background import of images to be disabled.",
+ "scheduledImageImportMinimumIntervalSeconds": "ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.",
+ "maxScheduledImageImportsPerMinute": "MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.",
+ "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
+ "internalRegistryHostname": "InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.",
+ "externalRegistryHostname": "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
+ "additionalTrustedCA": "AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.",
+}
+
+func (ImagePolicyConfig) SwaggerDoc() map[string]string {
+ return map_ImagePolicyConfig
+}
+
+var map_JenkinsPipelineConfig = map[string]string{
+ "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy",
+ "autoProvisionEnabled": "AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.",
+ "templateNamespace": "TemplateNamespace contains the namespace name where the Jenkins template is stored",
+ "templateName": "TemplateName is the name of the default Jenkins template",
+ "serviceName": "ServiceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.",
+ "parameters": "Parameters specifies a set of optional parameters to the Jenkins template.",
+}
+
+func (JenkinsPipelineConfig) SwaggerDoc() map[string]string {
+ return map_JenkinsPipelineConfig
+}
+
+var map_KeystonePasswordIdentityProvider = map[string]string{
+ "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "domainName": "Domain Name is required for keystone v3",
+ "useKeystoneIdentity": "UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username",
+}
+
+func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_KeystonePasswordIdentityProvider
+}
+
+var map_KubeletConnectionInfo = map[string]string{
+ "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet",
+ "port": "Port is the port to connect to kubelets on",
+ "ca": "CA is the CA for verifying TLS connections to kubelets",
+}
+
+func (KubeletConnectionInfo) SwaggerDoc() map[string]string {
+ return map_KubeletConnectionInfo
+}
+
+var map_KubernetesMasterConfig = map[string]string{
+ "": "KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master",
+ "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples",
+ "disabledAPIGroupVersions": "DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.",
+ "masterIP": "MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.",
+ "masterEndpointReconcileTTL": "MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.",
+ "servicesSubnet": "ServicesSubnet is the subnet to use for assigning service IPs",
+ "servicesNodePortRange": "ServicesNodePortRange is the range to use for assigning service public ports on a host.",
+ "schedulerConfigFile": "SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.",
+ "podEvictionTimeout": "PodEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.",
+ "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to pods",
+ "apiServerArguments": "APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.",
+ "controllerArguments": "ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.",
+ "schedulerArguments": "SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.",
+}
+
+func (KubernetesMasterConfig) SwaggerDoc() map[string]string {
+ return map_KubernetesMasterConfig
+}
+
+var map_LDAPAttributeMapping = map[string]string{
+ "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields",
+ "id": "ID is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"",
+ "preferredUsername": "PreferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"",
+ "name": "Name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"",
+ "email": "Email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+}
+
+func (LDAPAttributeMapping) SwaggerDoc() map[string]string {
+ return map_LDAPAttributeMapping
+}
+
+var map_LDAPPasswordIdentityProvider = map[string]string{
+ "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "url": "URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter",
+ "bindDN": "BindDN is an optional DN to bind with during the search phase.",
+ "bindPassword": "BindPassword is an optional password to bind with during the search phase.",
+ "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830",
+ "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used",
+ "attributes": "Attributes maps LDAP attributes to identities",
+}
+
+func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_LDAPPasswordIdentityProvider
+}
+
+var map_LDAPQuery = map[string]string{
+ "": "LDAPQuery holds the options necessary to build an LDAP query",
+ "baseDN": "The DN of the branch of the directory where all searches should start from",
+ "scope": "The (optional) scope of the search. Can be: base: only the base object, one: all object on the base level, sub: the entire subtree Defaults to the entire subtree if not set",
+ "derefAliases": "The (optional) behavior of the search with regards to alisases. Can be: never: never dereference aliases, search: only dereference in searching, base: only dereference in finding the base object, always: always dereference Defaults to always dereferencing if not set",
+ "timeout": "TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding before the wait for a response is given up. If this is 0, no client-side limit is imposed",
+ "filter": "Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN",
+ "pageSize": "PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.",
+}
+
+func (LDAPQuery) SwaggerDoc() map[string]string {
+ return map_LDAPQuery
+}
+
+var map_LDAPSyncConfig = map[string]string{
+ "": "LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "url": "Host is the scheme, host and port of the LDAP server to connect to: scheme://host:port",
+ "bindDN": "BindDN is an optional DN to bind to the LDAP server with",
+ "bindPassword": "BindPassword is an optional password to bind with during the search phase.",
+ "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830",
+ "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used",
+ "groupUIDNameMapping": "LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to OpenShift Group names",
+ "rfc2307": "RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion similar to RFC2307: first-class group and user entries, with group membership determined by a multi-valued attribute on the group entry listing its members",
+ "activeDirectory": "ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory: first-class user entries, with group membership determined by a multi-valued attribute on members listing groups they are a member of",
+ "augmentedActiveDirectory": "AugmentedActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory as described above, with one addition: first-class group entries exist and are used to hold metadata but not group membership",
+}
+
+func (LDAPSyncConfig) SwaggerDoc() map[string]string {
+ return map_LDAPSyncConfig
+}
+
+var map_LocalQuota = map[string]string{
+ "": "LocalQuota contains options for controlling local volume quota on the node.",
+ "perFSGroup": "FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID. At present this is only implemented for emptyDir volumes, and if the underlying volumeDirectory is on an XFS filesystem.",
+}
+
+func (LocalQuota) SwaggerDoc() map[string]string {
+ return map_LocalQuota
+}
+
+var map_MasterAuthConfig = map[string]string{
+ "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators",
+ "requestHeader": "RequestHeader holds options for setting up a front proxy against the API. It is optional.",
+ "webhookTokenAuthenticators": "WebhookTokenAuthnConfig, if present configures remote token reviewers",
+ "oauthMetadataFile": "OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig",
+}
+
+func (MasterAuthConfig) SwaggerDoc() map[string]string {
+ return map_MasterAuthConfig
+}
+
+var map_MasterClients = map[string]string{
+ "": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes",
+ "openshiftLoopbackKubeConfig": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master",
+ "openshiftLoopbackClientConnectionOverrides": "OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.",
+}
+
+func (MasterClients) SwaggerDoc() map[string]string {
+ return map_MasterClients
+}
+
+var map_MasterConfig = map[string]string{
+ "": "MasterConfig holds the necessary configuration options for the OpenShift master\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "servingInfo": "ServingInfo describes how to start serving",
+ "authConfig": "AuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators",
+ "aggregatorConfig": "AggregatorConfig has options for configuring the aggregator component of the API server.",
+ "corsAllowedOrigins": "CORSAllowedOrigins",
+ "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples",
+ "masterPublicURL": "MasterPublicURL is how clients can access the OpenShift API server",
+ "controllers": "Controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.",
+ "admissionConfig": "AdmissionConfig contains admission control plugin configuration.",
+ "controllerConfig": "ControllerConfig holds configuration values for controllers",
+ "etcdStorageConfig": "EtcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.",
+ "etcdClientInfo": "EtcdClientInfo contains information about how to connect to etcd",
+ "kubeletClientInfo": "KubeletClientInfo contains information about how to connect to kubelets",
+ "kubernetesMasterConfig": "KubernetesMasterConfig, if present start the kubernetes master in this process",
+ "etcdConfig": "EtcdConfig, if present start etcd in this process",
+ "oauthConfig": "OAuthConfig, if present start the /oauth endpoint in this process",
+ "dnsConfig": "DNSConfig, if present start the DNS server in this process",
+ "serviceAccountConfig": "ServiceAccountConfig holds options related to service accounts",
+ "masterClients": "MasterClients holds all the client connection information for controllers and other system components",
+ "imageConfig": "ImageConfig holds options that describe how to build image names for system components",
+ "imagePolicyConfig": "ImagePolicyConfig controls limits and behavior for importing images",
+ "policyConfig": "PolicyConfig holds information about where to locate critical pieces of bootstrapping policy",
+ "projectConfig": "ProjectConfig holds information about project creation and defaults",
+ "routingConfig": "RoutingConfig holds information about routing and route generation",
+ "networkConfig": "NetworkConfig to be passed to the compiled in network plugin",
+ "volumeConfig": "MasterVolumeConfig contains options for configuring volume plugins in the master node.",
+ "jenkinsPipelineConfig": "JenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.",
+ "auditConfig": "AuditConfig holds information related to auditing capabilities.",
+}
+
+func (MasterConfig) SwaggerDoc() map[string]string {
+ return map_MasterConfig
+}
+
+var map_MasterNetworkConfig = map[string]string{
+ "": "MasterNetworkConfig to be passed to the compiled in network plugin",
+ "networkPluginName": "NetworkPluginName is the name of the network plugin to use",
+ "clusterNetworkCIDR": "ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.",
+ "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.",
+ "hostSubnetLength": "HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.",
+ "serviceNetworkCIDR": "ServiceNetwork is the CIDR string to specify the service networks",
+ "externalIPNetworkCIDRs": "ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.",
+ "ingressIPNetworkCIDR": "IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.",
+ "vxlanPort": "VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value",
+}
+
+func (MasterNetworkConfig) SwaggerDoc() map[string]string {
+ return map_MasterNetworkConfig
+}
+
+var map_MasterVolumeConfig = map[string]string{
+ "": "MasterVolumeConfig contains options for configuring volume plugins in the master node.",
+ "dynamicProvisioningEnabled": "DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true",
+}
+
+func (MasterVolumeConfig) SwaggerDoc() map[string]string {
+ return map_MasterVolumeConfig
+}
+
+var map_NamedCertificate = map[string]string{
+ "": "NamedCertificate specifies a certificate/key, and the names it should be served for",
+ "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.",
+}
+
+func (NamedCertificate) SwaggerDoc() map[string]string {
+ return map_NamedCertificate
+}
+
+var map_NodeAuthConfig = map[string]string{
+ "": "NodeAuthConfig holds authn/authz configuration options",
+ "authenticationCacheTTL": "AuthenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled",
+ "authenticationCacheSize": "AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.",
+ "authorizationCacheTTL": "AuthorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled",
+ "authorizationCacheSize": "AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.",
+}
+
+func (NodeAuthConfig) SwaggerDoc() map[string]string {
+ return map_NodeAuthConfig
+}
+
+var map_NodeConfig = map[string]string{
+ "": "NodeConfig is the fully specified config starting an OpenShift node\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "nodeName": "NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list",
+ "nodeIP": "Node may have multiple IPs, specify the IP to use for pod traffic routing If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used",
+ "servingInfo": "ServingInfo describes how to start serving",
+ "masterKubeConfig": "MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master",
+ "masterClientConnectionOverrides": "MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master.",
+ "dnsDomain": "DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.",
+ "dnsIP": "DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.",
+ "dnsBindAddress": "DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.",
+ "dnsNameservers": "DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.",
+ "dnsRecursiveResolvConf": "DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.",
+ "networkPluginName": "Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead",
+ "networkConfig": "NetworkConfig provides network options for the node",
+ "volumeDirectory": "VolumeDirectory is the directory that volumes will be stored under",
+ "imageConfig": "ImageConfig holds options that describe how to build image names for system components",
+ "allowDisabledDocker": "AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.",
+ "podManifestConfig": "PodManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node",
+ "authConfig": "AuthConfig holds authn/authz configuration options",
+ "dockerConfig": "DockerConfig holds Docker related configuration options.",
+ "kubeletArguments": "KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.",
+ "proxyArguments": "ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.",
+ "iptablesSyncPeriod": "IPTablesSyncPeriod is how often iptable rules are refreshed",
+ "enableUnidling": "EnableUnidling controls whether or not the hybrid unidling proxy will be set up",
+ "volumeConfig": "VolumeConfig contains options for configuring volumes on the node.",
+}
+
+func (NodeConfig) SwaggerDoc() map[string]string {
+ return map_NodeConfig
+}
+
+var map_NodeNetworkConfig = map[string]string{
+ "": "NodeNetworkConfig provides network options for the node",
+ "networkPluginName": "NetworkPluginName is a string specifying the networking plugin",
+ "mtu": "Maximum transmission unit for the network packets",
+}
+
+func (NodeNetworkConfig) SwaggerDoc() map[string]string {
+ return map_NodeNetworkConfig
+}
+
+var map_NodeVolumeConfig = map[string]string{
+ "": "NodeVolumeConfig contains options for configuring volumes on the node.",
+ "localQuota": "LocalQuota contains options for controlling local volume quota on the node.",
+}
+
+func (NodeVolumeConfig) SwaggerDoc() map[string]string {
+ return map_NodeVolumeConfig
+}
+
+var map_OAuthConfig = map[string]string{
+ "": "OAuthConfig holds the necessary configuration options for OAuth authentication",
+ "masterCA": "MasterCA is the CA for verifying the TLS connection back to the MasterURL.",
+ "masterURL": "MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens",
+ "masterPublicURL": "MasterPublicURL is used for building valid client redirect URLs for internal and external access",
+ "assetPublicURL": "AssetPublicURL is used for building valid client redirect URLs for external access",
+ "alwaysShowProviderSelection": "AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.",
+ "identityProviders": "IdentityProviders is an ordered list of ways for a user to identify themselves",
+ "grantConfig": "GrantConfig describes how to handle grants",
+ "sessionConfig": "SessionConfig hold information about configuring sessions.",
+ "tokenConfig": "TokenConfig contains options for authorization and access tokens",
+ "templates": "Templates allow you to customize pages like the login page.",
+}
+
+func (OAuthConfig) SwaggerDoc() map[string]string {
+ return map_OAuthConfig
+}
+
+var map_OAuthTemplates = map[string]string{
+ "": "OAuthTemplates allow for customization of pages like the login page",
+ "login": "Login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.",
+ "providerSelection": "ProviderSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.",
+ "error": "Error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.",
+}
+
+func (OAuthTemplates) SwaggerDoc() map[string]string {
+ return map_OAuthTemplates
+}
+
+var map_OpenIDClaims = map[string]string{
+ "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider",
+ "id": "ID is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"",
+ "preferredUsername": "PreferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim",
+ "name": "Name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity",
+ "email": "Email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+}
+
+func (OpenIDClaims) SwaggerDoc() map[string]string {
+ return map_OpenIDClaims
+}
+
+var map_OpenIDIdentityProvider = map[string]string{
+ "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used",
+ "clientID": "ClientID is the oauth client ID",
+ "clientSecret": "ClientSecret is the oauth client secret",
+ "extraScopes": "ExtraScopes are any scopes to request in addition to the standard \"openid\" scope.",
+ "extraAuthorizeParameters": "ExtraAuthorizeParameters are any custom parameters to add to the authorize request.",
+ "urls": "URLs to use to authenticate",
+ "claims": "Claims mappings",
+}
+
+func (OpenIDIdentityProvider) SwaggerDoc() map[string]string {
+ return map_OpenIDIdentityProvider
+}
+
+var map_OpenIDURLs = map[string]string{
+ "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider",
+ "authorize": "Authorize is the oauth authorization URL",
+ "token": "Token is the oauth token granting URL",
+ "userInfo": "UserInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims",
+}
+
+func (OpenIDURLs) SwaggerDoc() map[string]string {
+ return map_OpenIDURLs
+}
+
+var map_PodManifestConfig = map[string]string{
+ "": "PodManifestConfig holds the necessary configuration options for using pod manifests",
+ "path": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node",
+ "fileCheckIntervalSeconds": "FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value",
+}
+
+func (PodManifestConfig) SwaggerDoc() map[string]string {
+ return map_PodManifestConfig
+}
+
+var map_PolicyConfig = map[string]string{
+ "": "holds the necessary configuration options for",
+ "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!",
+}
+
+func (PolicyConfig) SwaggerDoc() map[string]string {
+ return map_PolicyConfig
+}
+
+var map_ProjectConfig = map[string]string{
+ "": "holds the necessary configuration options for",
+ "defaultNodeSelector": "DefaultNodeSelector holds default project node label selector",
+ "projectRequestMessage": "ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
+ "projectRequestTemplate": "ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.",
+ "securityAllocator": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.",
+}
+
+func (ProjectConfig) SwaggerDoc() map[string]string {
+ return map_ProjectConfig
+}
+
+var map_RFC2307Config = map[string]string{
+ "": "RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the RFC2307 schema",
+ "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.",
+ "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)",
+ "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group",
+ "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute",
+ "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.",
+ "userUIDAttribute": "UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes",
+ "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider",
+ "tolerateMemberNotFoundErrors": "TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.",
+ "tolerateMemberOutOfScopeErrors": "TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.",
+}
+
+func (RFC2307Config) SwaggerDoc() map[string]string {
+ return map_RFC2307Config
+}
+
+var map_RegistryLocation = map[string]string{
+ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
+ "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
+ "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
+}
+
+func (RegistryLocation) SwaggerDoc() map[string]string {
+ return map_RegistryLocation
+}
+
+var map_RemoteConnectionInfo = map[string]string{
+ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection",
+ "url": "URL is the remote URL to connect to",
+ "ca": "CA is the CA for verifying TLS connections",
+}
+
+func (RemoteConnectionInfo) SwaggerDoc() map[string]string {
+ return map_RemoteConnectionInfo
+}
+
+var map_RequestHeaderAuthenticationOptions = map[string]string{
+ "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.",
+ "clientCA": "ClientCA is a file with the trusted signer certs. It is required.",
+ "clientCommonNames": "ClientCommonNames is a required list of common names to require a match from.",
+ "usernameHeaders": "UsernameHeaders is the list of headers to check for user information. First hit wins.",
+ "groupHeaders": "GroupNameHeader is the set of headers to check for group information. All are unioned.",
+ "extraHeaderPrefixes": "ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.",
+}
+
+func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string {
+ return map_RequestHeaderAuthenticationOptions
+}
+
+var map_RequestHeaderIdentityProvider = map[string]string{
+ "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "loginURL": "LoginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}",
+ "challengeURL": "ChallengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}",
+ "clientCA": "ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.",
+ "clientCommonNames": "ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.",
+ "headers": "Headers is the set of headers to check for identity information",
+ "preferredUsernameHeaders": "PreferredUsernameHeaders is the set of headers to check for the preferred username",
+ "nameHeaders": "NameHeaders is the set of headers to check for the display name",
+ "emailHeaders": "EmailHeaders is the set of headers to check for the email address",
+}
+
+func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string {
+ return map_RequestHeaderIdentityProvider
+}
+
+var map_RoutingConfig = map[string]string{
+ "": "RoutingConfig holds the necessary configuration options for routing to subdomains",
+ "subdomain": "Subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.",
+}
+
+func (RoutingConfig) SwaggerDoc() map[string]string {
+ return map_RoutingConfig
+}
+
+var map_SecurityAllocator = map[string]string{
+ "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.",
+ "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).",
+ "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511",
+ "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).",
+}
+
+func (SecurityAllocator) SwaggerDoc() map[string]string {
+ return map_SecurityAllocator
+}
+
+var map_ServiceAccountConfig = map[string]string{
+ "": "ServiceAccountConfig holds the necessary configuration options for a service account",
+ "managedNames": "ManagedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.",
+ "limitSecretReferences": "LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them",
+ "privateKeyFile": "PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.",
+ "publicKeyFiles": "PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.",
+ "masterCA": "MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.",
+}
+
+func (ServiceAccountConfig) SwaggerDoc() map[string]string {
+ return map_ServiceAccountConfig
+}
+
+var map_ServiceServingCert = map[string]string{
+ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.",
+ "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.",
+}
+
+func (ServiceServingCert) SwaggerDoc() map[string]string {
+ return map_ServiceServingCert
+}
+
+var map_ServingInfo = map[string]string{
+ "": "ServingInfo holds information about serving web pages",
+ "bindAddress": "BindAddress is the ip:port to serve on",
+ "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"",
+ "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates",
+ "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames",
+ "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants",
+ "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants",
+}
+
+func (ServingInfo) SwaggerDoc() map[string]string {
+ return map_ServingInfo
+}
+
+var map_SessionConfig = map[string]string{
+ "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession",
+ "sessionSecretsFile": "SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start",
+ "sessionMaxAgeSeconds": "SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession",
+ "sessionName": "SessionName is the cookie name used to store the session",
+}
+
+func (SessionConfig) SwaggerDoc() map[string]string {
+ return map_SessionConfig
+}
+
+var map_SessionSecret = map[string]string{
+ "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions",
+ "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.",
+ "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-",
+}
+
+func (SessionSecret) SwaggerDoc() map[string]string {
+ return map_SessionSecret
+}
+
+var map_SessionSecrets = map[string]string{
+ "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.",
+}
+
+func (SessionSecrets) SwaggerDoc() map[string]string {
+ return map_SessionSecrets
+}
+
+var map_SourceStrategyDefaultsConfig = map[string]string{
+ "": "SourceStrategyDefaultsConfig contains values that apply to builds using the source strategy.",
+ "incremental": "incremental indicates if s2i build strategies should perform an incremental build or not",
+}
+
+func (SourceStrategyDefaultsConfig) SwaggerDoc() map[string]string {
+ return map_SourceStrategyDefaultsConfig
+}
+
+var map_StringSource = map[string]string{
+ "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.",
+}
+
+func (StringSource) SwaggerDoc() map[string]string {
+ return map_StringSource
+}
+
+var map_StringSourceSpec = map[string]string{
+ "": "StringSourceSpec specifies a string value, or external location",
+ "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.",
+ "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.",
+ "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.",
+ "keyFile": "KeyFile references a file containing the key to use to decrypt the value.",
+}
+
+func (StringSourceSpec) SwaggerDoc() map[string]string {
+ return map_StringSourceSpec
+}
+
+var map_TokenConfig = map[string]string{
+ "": "TokenConfig holds the necessary configuration options for authorization and access tokens",
+ "authorizeTokenMaxAgeSeconds": "AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens",
+ "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds defines the maximum age of access tokens",
+ "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)",
+}
+
+func (TokenConfig) SwaggerDoc() map[string]string {
+ return map_TokenConfig
+}
+
+var map_UserAgentDenyRule = map[string]string{
+ "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client",
+ "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.",
+}
+
+func (UserAgentDenyRule) SwaggerDoc() map[string]string {
+ return map_UserAgentDenyRule
+}
+
+var map_UserAgentMatchRule = map[string]string{
+ "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb",
+ "regex": "UserAgentRegex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f",
+ "httpVerbs": "HTTPVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".",
+}
+
+func (UserAgentMatchRule) SwaggerDoc() map[string]string {
+ return map_UserAgentMatchRule
+}
+
+var map_UserAgentMatchingConfig = map[string]string{
+ "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!",
+ "requiredClients": "If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed",
+ "deniedClients": "If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes",
+ "defaultRejectionMessage": "DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.",
+}
+
+func (UserAgentMatchingConfig) SwaggerDoc() map[string]string {
+ return map_UserAgentMatchingConfig
+}
+
+var map_WebhookTokenAuthenticator = map[string]string{
+ "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators",
+ "configFile": "ConfigFile is a path to a Kubeconfig file with the webhook configuration",
+ "cacheTTL": "CacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled",
+}
+
+func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string {
+ return map_WebhookTokenAuthenticator
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/machine/.codegen.yaml b/vendor/github.com/openshift/api/machine/.codegen.yaml
new file mode 100644
index 0000000000..bc2d86d4c6
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/.codegen.yaml
@@ -0,0 +1,3 @@
+swaggerdocs:
+ commentPolicy: Warn
+
diff --git a/vendor/github.com/openshift/api/machine/OWNERS b/vendor/github.com/openshift/api/machine/OWNERS
new file mode 100644
index 0000000000..53e482c75b
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/OWNERS
@@ -0,0 +1,4 @@
+reviewers:
+ - JoelSpeed
+ - alexander-demichev
+ - mandre
diff --git a/vendor/github.com/openshift/api/machine/install.go b/vendor/github.com/openshift/api/machine/install.go
new file mode 100644
index 0000000000..68df57704d
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/install.go
@@ -0,0 +1,32 @@
+package machine
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ machinev1 "github.com/openshift/api/machine/v1"
+ machinev1alpha1 "github.com/openshift/api/machine/v1alpha1"
+ machinev1beta1 "github.com/openshift/api/machine/v1beta1"
+)
+
+const (
+ GroupName = "machine.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(
+ machinev1beta1.Install,
+ machinev1.Install,
+ machinev1alpha1.Install,
+ )
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/Makefile b/vendor/github.com/openshift/api/machine/v1/Makefile
new file mode 100644
index 0000000000..767014ac19
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="machine.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/machine/v1/common.go b/vendor/github.com/openshift/api/machine/v1/common.go
new file mode 100644
index 0000000000..941d22b1cc
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/common.go
@@ -0,0 +1,13 @@
+package v1
+
+// InstanceTenancy indicates if instance should run on shared or single-tenant hardware.
+type InstanceTenancy string
+
+const (
+ // DefaultTenancy instance runs on shared hardware
+ DefaultTenancy InstanceTenancy = "default"
+ // DedicatedTenancy instance runs on single-tenant hardware
+ DedicatedTenancy InstanceTenancy = "dedicated"
+ // HostTenancy instance runs on a Dedicated Host, which is an isolated server with configurations that you can control.
+ HostTenancy InstanceTenancy = "host"
+)
diff --git a/vendor/github.com/openshift/api/machine/v1/doc.go b/vendor/github.com/openshift/api/machine/v1/doc.go
new file mode 100644
index 0000000000..7bd97c9507
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=machine.openshift.io
+package v1
diff --git a/vendor/github.com/openshift/api/machine/v1/register.go b/vendor/github.com/openshift/api/machine/v1/register.go
new file mode 100644
index 0000000000..b950169bfa
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/register.go
@@ -0,0 +1,40 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "machine.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+
+ scheme.AddKnownTypes(GroupVersion,
+ &ControlPlaneMachineSet{},
+ &ControlPlaneMachineSetList{},
+ )
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go
new file mode 100644
index 0000000000..4b5c8d6efb
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go
@@ -0,0 +1,374 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// AlibabaDiskPerformanceLevel enum attribute to describe a disk's performance level
+type AlibabaDiskPerformanceLevel string
+
+// AlibabaDiskCatagory enum attribute to deescribe a disk's category
+type AlibabaDiskCategory string
+
+// AlibabaDiskEncryptionMode enum attribute to describe whether to enable or disable disk encryption
+type AlibabaDiskEncryptionMode string
+
+// AlibabaDiskPreservationPolicy enum attribute to describe whether to preserve or delete a disk upon instance removal
+type AlibabaDiskPreservationPolicy string
+
+// AlibabaResourceReferenceType enum attribute to identify the type of resource reference
+type AlibabaResourceReferenceType string
+
+const (
+ // DeleteWithInstance enum property to delete disk with instance deletion
+ DeleteWithInstance AlibabaDiskPreservationPolicy = "DeleteWithInstance"
+ // PreserveDisk enum property to determine disk preservation with instance deletion
+ PreserveDisk AlibabaDiskPreservationPolicy = "PreserveDisk"
+
+ // AlibabaDiskEncryptionEnabled enum property to enable disk encryption
+ AlibabaDiskEncryptionEnabled AlibabaDiskEncryptionMode = "encrypted"
+ // AlibabaDiskEncryptionDisabled enum property to disable disk encryption
+ AlibabaDiskEncryptionDisabled AlibabaDiskEncryptionMode = "disabled"
+
+ // AlibabaDiskPerformanceLevel0 enum property to set the level at PL0
+ PL0 AlibabaDiskPerformanceLevel = "PL0"
+ // AlibabaDiskPerformanceLevel1 enum property to set the level at PL1
+ PL1 AlibabaDiskPerformanceLevel = "PL1"
+ // AlibabaDiskPerformanceLevel2 enum property to set the level at PL2
+ PL2 AlibabaDiskPerformanceLevel = "PL2"
+ // AlibabaDiskPerformanceLevel3 enum property to set the level at PL3
+ PL3 AlibabaDiskPerformanceLevel = "PL3"
+
+ // AlibabaDiskCategoryUltraDisk enum proprty to set the category of disk to ultra disk
+ AlibabaDiskCatagoryUltraDisk AlibabaDiskCategory = "cloud_efficiency"
+ // AlibabaDiskCategorySSD enum proprty to set the category of disk to standard SSD
+ AlibabaDiskCatagorySSD AlibabaDiskCategory = "cloud_ssd"
+ // AlibabaDiskCategoryESSD enum proprty to set the category of disk to ESSD
+ AlibabaDiskCatagoryESSD AlibabaDiskCategory = "cloud_essd"
+ // AlibabaDiskCategoryBasic enum proprty to set the category of disk to basic
+ AlibabaDiskCatagoryBasic AlibabaDiskCategory = "cloud"
+
+ // AlibabaResourceReferenceTypeID enum property to identify an ID type resource reference
+ AlibabaResourceReferenceTypeID AlibabaResourceReferenceType = "ID"
+ // AlibabaResourceReferenceTypeName enum property to identify an Name type resource reference
+ AlibabaResourceReferenceTypeName AlibabaResourceReferenceType = "Name"
+ // AlibabaResourceReferenceTypeTags enum property to identify a tags type resource reference
+ AlibabaResourceReferenceTypeTags AlibabaResourceReferenceType = "Tags"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AlibabaCloudMachineProviderConfig is the Schema for the alibabacloudmachineproviderconfig API
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:openapi-gen=true
+type AlibabaCloudMachineProviderConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // More detail about alibabacloud ECS
+ // https://www.alibabacloud.com/help/doc-detail/25499.htm?spm=a2c63.l28256.b99.727.496d7453jF7Moz
+
+ //The instance type of the instance.
+ InstanceType string `json:"instanceType"`
+
+ // The ID of the vpc
+ VpcID string `json:"vpcId"`
+
+ // The ID of the region in which to create the instance. You can call the DescribeRegions operation to query the most recent region list.
+ RegionID string `json:"regionId"`
+
+ // The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list.
+ ZoneID string `json:"zoneId"`
+
+ // The ID of the image used to create the instance.
+ ImageID string `json:"imageId"`
+
+ // DataDisks holds information regarding the extra disks attached to the instance
+ // +optional
+ DataDisks []DataDiskProperties `json:"dataDisk,omitempty"`
+
+ // SecurityGroups is a list of security group references to assign to the instance.
+ // A reference holds either the security group ID, the resource name, or the required tags to search.
+ // When more than one security group is returned for a tag search, all the groups are associated with the instance up to the
+ // maximum number of security groups to which an instance can belong.
+ // For more information, see the "Security group limits" section in Limits.
+ // https://www.alibabacloud.com/help/en/doc-detail/25412.htm
+ SecurityGroups []AlibabaResourceReference `json:"securityGroups,omitempty"`
+
+ // Bandwidth describes the internet bandwidth strategy for the instance
+ // +optional
+ Bandwidth BandwidthProperties `json:"bandwidth,omitempty"`
+
+ // SystemDisk holds the properties regarding the system disk for the instance
+ // +optional
+ SystemDisk SystemDiskProperties `json:"systemDisk,omitempty"`
+
+ // VSwitch is a reference to the vswitch to use for this instance.
+ // A reference holds either the vSwitch ID, the resource name, or the required tags to search.
+ // When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used.
+ // This parameter is required when you create an instance of the VPC type.
+ // You can call the DescribeVSwitches operation to query the created vSwitches.
+ VSwitch AlibabaResourceReference `json:"vSwitch"`
+
+ // RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.
+ // +optional
+ RAMRoleName string `json:"ramRoleName,omitempty"`
+
+ // ResourceGroup references the resource group to which to assign the instance.
+ // A reference holds either the resource group ID, the resource name, or the required tags to search.
+ // When more than one resource group are returned for a search, an error will be produced and the Machine will not be created.
+ // Resource Groups do not support searching by tags.
+ ResourceGroup AlibabaResourceReference `json:"resourceGroup"`
+
+ // Tenancy specifies whether to create the instance on a dedicated host.
+ // Valid values:
+ //
+ // default: creates the instance on a non-dedicated host.
+ // host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance.
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default is `default`.
+ // +optional
+ Tenancy InstanceTenancy `json:"tenancy,omitempty"`
+
+ // UserDataSecret contains a local reference to a secret that contains the
+ // UserData to apply to the instance
+ // +optional
+ UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"`
+
+ // CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions
+ // provided by attached RAM role where the actuator is running.
+ // +optional
+ CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"`
+
+ // Tags are the set of metadata to add to an instance.
+ // +optional
+ Tags []Tag `json:"tag,omitempty"`
+}
+
+// ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags.
+// Only one of ID or Tags may be specified. Specifying more than one will result in
+// a validation error.
+type AlibabaResourceReference struct {
+ // type identifies the resource reference type for this entry.
+ Type AlibabaResourceReferenceType `json:"type"`
+
+ // ID of resource
+ // +optional
+ ID *string `json:"id,omitempty"`
+
+ // Name of the resource
+ // +optional
+ Name *string `json:"name,omitempty"`
+
+ // Tags is a set of metadata based upon ECS object tags used to identify a resource.
+ // For details about usage when multiple resources are found, please see the owning parent field documentation.
+ // +optional
+ Tags *[]Tag `json:"tags,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AlibabaCloudMachineProviderConfigList contains a list of AlibabaCloudMachineProviderConfig
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type AlibabaCloudMachineProviderConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []AlibabaCloudMachineProviderConfig `json:"items"`
+}
+
+// AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type AlibabaCloudMachineProviderStatus struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // InstanceID is the instance ID of the machine created in alibabacloud
+ // +optional
+ InstanceID *string `json:"instanceId,omitempty"`
+
+ // InstanceState is the state of the alibabacloud instance for this machine
+ // +optional
+ InstanceState *string `json:"instanceState,omitempty"`
+
+ // Conditions is a set of conditions associated with the Machine to indicate
+ // errors or other status
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category
+type SystemDiskProperties struct {
+ // Category is the category of the system disk.
+ // Valid values:
+ // cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk.
+ // cloud_efficiency: ultra disk.
+ // cloud_ssd: standard SSD.
+ // cloud: basic disk.
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently for non-I/O optimized instances of retired instance types, the default is `cloud`.
+ // Currently for other instances, the default is `cloud_efficiency`.
+ // +kubebuilder:validation:Enum="cloud_efficiency"; "cloud_ssd"; "cloud_essd"; "cloud"
+ // +optional
+ Category string `json:"category,omitempty"`
+
+ // PerformanceLevel is the performance level of the ESSD used as the system disk.
+ // Valid values:
+ //
+ // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS.
+ // PL1: A single ESSD can deliver up to 50,000 random read/write IOPS.
+ // PL2: A single ESSD can deliver up to 100,000 random read/write IOPS.
+ // PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS.
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over time.
+ // Currently the default is `PL1`.
+ // For more information about ESSD performance levels, see ESSDs.
+ // +kubebuilder:validation:Enum="PL0"; "PL1"; "PL2"; "PL3"
+ // +optional
+ PerformanceLevel string `json:"performanceLevel,omitempty"`
+
+ // Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
+ // Empty value means the platform chooses a default, which is subject to change over time.
+ // Currently the default is `""`.
+ // +kubebuilder:validation:MaxLength=128
+ // +optional
+ Name string `json:"name,omitempty"`
+
+ // Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500.
+ // The value must be at least 20 and greater than or equal to the size of the image.
+ // Empty value means the platform chooses a default, which is subject to change over time.
+ // Currently the default is `40` or the size of the image depending on whichever is greater.
+ // +optional
+ Size int64 `json:"size,omitempty"`
+}
+
+// DataDisk contains the information regarding the datadisk attached to an instance
+type DataDiskProperties struct {
+ // Name is the name of data disk N. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).
+ //
+ // Empty value means the platform chooses a default, which is subject to change over time.
+ // Currently the default is `""`.
+ // +optional
+ Name string `name:"diskName,omitempty"`
+
+ // SnapshotID is the ID of the snapshot used to create data disk N. Valid values of N: 1 to 16.
+ //
+ // When the DataDisk.N.SnapshotID parameter is specified, the DataDisk.N.Size parameter is ignored. The data disk is created based on the size of the specified snapshot.
+ // Use snapshots created after July 15, 2013. Otherwise, an error is returned and your request is rejected.
+ //
+ // +optional
+ SnapshotID string `name:"snapshotId,omitempty"`
+
+ // Size of the data disk N. Valid values of N: 1 to 16. Unit: GiB. Valid values:
+ //
+ // Valid values when DataDisk.N.Category is set to cloud_efficiency: 20 to 32768
+ // Valid values when DataDisk.N.Category is set to cloud_ssd: 20 to 32768
+ // Valid values when DataDisk.N.Category is set to cloud_essd: 20 to 32768
+ // Valid values when DataDisk.N.Category is set to cloud: 5 to 2000
+ // The value of this parameter must be greater than or equal to the size of the snapshot specified by the SnapshotID parameter.
+ // +optional
+ Size int64 `name:"size,omitempty"`
+
+ // DiskEncryption specifies whether to encrypt data disk N.
+ //
+ // Empty value means the platform chooses a default, which is subject to change over time.
+ // Currently the default is `disabled`.
+ // +kubebuilder:validation:Enum="encrypted";"disabled"
+ // +optional
+ DiskEncryption AlibabaDiskEncryptionMode `name:"diskEncryption,omitempty"`
+
+ // PerformanceLevel is the performance level of the ESSD used as as data disk N. The N value must be the same as that in DataDisk.N.Category when DataDisk.N.Category is set to cloud_essd.
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over time.
+ // Currently the default is `PL1`.
+ // Valid values:
+ //
+ // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS.
+ // PL1: A single ESSD can deliver up to 50,000 random read/write IOPS.
+ // PL2: A single ESSD can deliver up to 100,000 random read/write IOPS.
+ // PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS.
+ // For more information about ESSD performance levels, see ESSDs.
+ // +kubebuilder:validation:Enum="PL0"; "PL1"; "PL2"; "PL3"
+ // +optional
+ PerformanceLevel AlibabaDiskPerformanceLevel `name:"performanceLevel,omitempty"`
+
+ // Category describes the type of data disk N.
+ // Valid values:
+ // cloud_efficiency: ultra disk
+ // cloud_ssd: standard SSD
+ // cloud_essd: ESSD
+ // cloud: basic disk
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently for non-I/O optimized instances of retired instance types, the default is `cloud`.
+ // Currently for other instances, the default is `cloud_efficiency`.
+ // +kubebuilder:validation:Enum="cloud_efficiency"; "cloud_ssd"; "cloud_essd"; "cloud"
+ // +optional
+ Category AlibabaDiskCategory `name:"category,omitempty"`
+
+ // KMSKeyID is the ID of the Key Management Service (KMS) key to be used by data disk N.
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default is `""` which is interpreted as do not use KMSKey encryption.
+ // +optional
+ KMSKeyID string `name:"kmsKeyId,omitempty"`
+
+ // DiskPreservation specifies whether to release data disk N along with the instance.
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default is `DeleteWithInstance`
+ // +kubebuilder:validation:Enum="DeleteWithInstance";"PreserveDisk"
+ // +optional
+ DiskPreservation AlibabaDiskPreservationPolicy `name:"diskPreservation,omitempty"`
+}
+
+// Tag The tags of ECS Instance
+type Tag struct {
+ // Key is the name of the key pair
+ Key string `name:"Key"`
+ // Value is the value or data of the key pair
+ Value string `name:"value"`
+}
+
+// Bandwidth describes the bandwidth strategy for the network of the instance
+type BandwidthProperties struct {
+ // InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values:
+ // When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10.
+ // Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s.
+ // When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value.
+ // Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.
+ // +optional
+ InternetMaxBandwidthIn int64 `json:"internetMaxBandwidthIn,omitempty"`
+
+ // InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100.
+ // When a value greater than 0 is used then a public IP address is assigned to the instance.
+ // Empty value means no opinion and the platform chooses the a default, which is subject to change over time.
+ // Currently the default is `0`
+ // +optional
+ InternetMaxBandwidthOut int64 `json:"internetMaxBandwidthOut,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/types_aws.go b/vendor/github.com/openshift/api/machine/v1/types_aws.go
new file mode 100644
index 0000000000..a41237c3bf
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/types_aws.go
@@ -0,0 +1,49 @@
+package v1
+
+// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
+// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
+// a validation error.
+// +union
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ID' ? has(self.id) : !has(self.id)",message="id is required when type is ID, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ARN' ? has(self.arn) : !has(self.arn)",message="arn is required when type is ARN, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Filters' ? has(self.filters) : !has(self.filters)",message="filters is required when type is Filters, and forbidden otherwise"
+type AWSResourceReference struct {
+ // Type determines how the reference will fetch the AWS resource.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ Type AWSResourceReferenceType `json:"type"`
+ // ID of resource.
+ // +optional
+ ID *string `json:"id,omitempty"`
+ // ARN of resource.
+ // +optional
+ ARN *string `json:"arn,omitempty"`
+ // Filters is a set of filters used to identify a resource.
+ // +optional
+ Filters *[]AWSResourceFilter `json:"filters,omitempty"`
+}
+
+// AWSResourceReferenceType is an enumeration of different resource reference types.
+// +kubebuilder:validation:Enum:="ID";"ARN";"Filters"
+type AWSResourceReferenceType string
+
+const (
+ // AWSIDReferenceType is a resource reference based on the object ID.
+ AWSIDReferenceType AWSResourceReferenceType = "ID"
+
+ // AWSARNReferenceType is a resource reference based on the object ARN.
+ AWSARNReferenceType AWSResourceReferenceType = "ARN"
+
+ // AWSFiltersReferenceType is a resource reference based on filters.
+ AWSFiltersReferenceType AWSResourceReferenceType = "Filters"
+)
+
+// AWSResourceFilter is a filter used to identify an AWS resource
+type AWSResourceFilter struct {
+ // Name of the filter. Filter names are case-sensitive.
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+ // Values includes one or more filter values. Filter values are case-sensitive.
+ // +optional
+ Values []string `json:"values,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go
new file mode 100644
index 0000000000..a2e7ae03e1
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go
@@ -0,0 +1,468 @@
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ machinev1beta1 "github.com/openshift/api/machine/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=controlplanemachinesets,scope=Namespaced
+// +kubebuilder:subresource:status
+// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas
+// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas"
+// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas"
+// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas"
+// +kubebuilder:printcolumn:name="Updated",type="integer",JSONPath=".status.updatedReplicas",description="Updated Replicas"
+// +kubebuilder:printcolumn:name="Unavailable",type="integer",JSONPath=".status.unavailableReplicas",description="Observed number of unavailable replicas"
+// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".spec.state",description="ControlPlaneMachineSet state"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="ControlPlaneMachineSet age"
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1112
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=control-plane-machine-set,operatorOrdering=01
+// +openshift:capability=MachineAPI
+// +kubebuilder:metadata:annotations="exclude.release.openshift.io/internal-openshift-hosted=true"
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+
+// ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time.
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ControlPlaneMachineSet struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ControlPlaneMachineSetSpec `json:"spec,omitempty"`
+ Status ControlPlaneMachineSetStatus `json:"status,omitempty"`
+}
+
+// ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet.
+type ControlPlaneMachineSetSpec struct {
+ // State defines whether the ControlPlaneMachineSet is Active or Inactive.
+ // When Inactive, the ControlPlaneMachineSet will not take any action on the
+ // state of the Machines within the cluster.
+ // When Active, the ControlPlaneMachineSet will reconcile the Machines and
+ // will update the Machines as necessary.
+ // Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent
+ // further action please remove the ControlPlaneMachineSet.
+ // +kubebuilder:default:="Inactive"
+ // +default="Inactive"
+ // +kubebuilder:validation:XValidation:rule="oldSelf != 'Active' || self == oldSelf",message="state cannot be changed once Active"
+ // +optional
+ State ControlPlaneMachineSetState `json:"state,omitempty"`
+
+ // Replicas defines how many Control Plane Machines should be
+ // created by this ControlPlaneMachineSet.
+ // This field is immutable and cannot be changed after cluster
+ // installation.
+ // The ControlPlaneMachineSet only operates with 3 or 5 node control planes,
+ // 3 and 5 are the only valid values for this field.
+ // +kubebuilder:validation:Enum:=3;5
+ // +kubebuilder:default:=3
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="replicas is immutable"
+ // +kubebuilder:validation:Required
+ Replicas *int32 `json:"replicas"`
+
+ // Strategy defines how the ControlPlaneMachineSet will update
+ // Machines when it detects a change to the ProviderSpec.
+ // +kubebuilder:default:={type: RollingUpdate}
+ // +optional
+ Strategy ControlPlaneMachineSetStrategy `json:"strategy,omitempty"`
+
+ // Label selector for Machines. Existing Machines selected by this
+ // selector will be the ones affected by this ControlPlaneMachineSet.
+ // It must match the template's labels.
+ // This field is considered immutable after creation of the resource.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="selector is immutable"
+ // +kubebuilder:validation:Required
+ Selector metav1.LabelSelector `json:"selector"`
+
+ // Template describes the Control Plane Machines that will be created
+ // by this ControlPlaneMachineSet.
+ // +kubebuilder:validation:Required
+ Template ControlPlaneMachineSetTemplate `json:"template"`
+}
+
+// ControlPlaneMachineSetState is an enumeration of the possible states of the
+// ControlPlaneMachineSet resource. It allows it to be either Active or Inactive.
+// +kubebuilder:validation:Enum:="Active";"Inactive"
+type ControlPlaneMachineSetState string
+
+const (
+ // ControlPlaneMachineSetStateActive is the value used to denote the ControlPlaneMachineSet
+ // should be active and should perform updates as required.
+ ControlPlaneMachineSetStateActive ControlPlaneMachineSetState = "Active"
+
+ // ControlPlaneMachineSetStateInactive is the value used to denote the ControlPlaneMachineSet
+ // should be not active and should no perform any updates.
+ ControlPlaneMachineSetStateInactive ControlPlaneMachineSetState = "Inactive"
+)
+
+// ControlPlaneMachineSetTemplate is a template used by the ControlPlaneMachineSet
+// to create the Machines that it will manage in the future.
+// +union
+// + ---
+// + This struct is a discriminated union which allows users to select the type of Machine
+// + that the ControlPlaneMachineSet should create and manage.
+// + For now, the only supported type is the OpenShift Machine API Machine, but in the future
+// + we plan to expand this to allow other Machine types such as Cluster API Machines or a
+// + future version of the Machine API Machine.
+// +kubebuilder:validation:XValidation:rule="has(self.machineType) && self.machineType == 'machines_v1beta1_machine_openshift_io' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)",message="machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise"
+type ControlPlaneMachineSetTemplate struct {
+ // MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet.
+ // Currently, the only valid value is machines_v1beta1_machine_openshift_io.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ MachineType ControlPlaneMachineSetMachineType `json:"machineType,omitempty"`
+
+ // OpenShiftMachineV1Beta1Machine defines the template for creating Machines
+ // from the v1beta1.machine.openshift.io API group.
+ // +optional
+ OpenShiftMachineV1Beta1Machine *OpenShiftMachineV1Beta1MachineTemplate `json:"machines_v1beta1_machine_openshift_io,omitempty"`
+}
+
+// ControlPlaneMachineSetMachineType is a enumeration of valid Machine types
+// supported by the ControlPlaneMachineSet.
+// +kubebuilder:validation:Enum:=machines_v1beta1_machine_openshift_io
+type ControlPlaneMachineSetMachineType string
+
+const (
+ // OpenShiftMachineV1Beta1MachineType is the OpenShift Machine API v1beta1 Machine type.
+ OpenShiftMachineV1Beta1MachineType ControlPlaneMachineSetMachineType = "machines_v1beta1_machine_openshift_io"
+)
+
+// OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create
+// Machines from the v1beta1.machine.openshift.io API group.
+type OpenShiftMachineV1Beta1MachineTemplate struct {
+ // FailureDomains is the list of failure domains (sometimes called
+ // availability zones) in which the ControlPlaneMachineSet should balance
+ // the Control Plane Machines.
+ // This will be merged into the ProviderSpec given in the template.
+ // This field is optional on platforms that do not require placement information.
+ // +optional
+ FailureDomains *FailureDomains `json:"failureDomains,omitempty"`
+
+ // ObjectMeta is the standard object metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // Labels are required to match the ControlPlaneMachineSet selector.
+ // +kubebuilder:validation:Required
+ ObjectMeta ControlPlaneMachineSetTemplateObjectMeta `json:"metadata"`
+
+ // Spec contains the desired configuration of the Control Plane Machines.
+ // The ProviderSpec within contains platform specific details
+ // for creating the Control Plane Machines.
+ // The ProviderSe should be complete apart from the platform specific
+ // failure domain field. This will be overriden when the Machines
+ // are created based on the FailureDomains field.
+ // +kubebuilder:validation:Required
+ Spec machinev1beta1.MachineSpec `json:"spec"`
+}
+
+// ControlPlaneMachineSetTemplateObjectMeta is a subset of the metav1.ObjectMeta struct.
+// It allows users to specify labels and annotations that will be copied onto Machines
+// created from this template.
+type ControlPlaneMachineSetTemplateObjectMeta struct {
+ // Map of string keys and values that can be used to organize and categorize
+ // (scope and select) objects. May match selectors of replication controllers
+ // and services.
+ // More info: http://kubernetes.io/docs/user-guide/labels.
+ // This field must contain both the 'machine.openshift.io/cluster-api-machine-role' and 'machine.openshift.io/cluster-api-machine-type' labels, both with a value of 'master'.
+ // It must also contain a label with the key 'machine.openshift.io/cluster-api-cluster'.
+ // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-role' in self && self['machine.openshift.io/cluster-api-machine-role'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'"
+ // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-type' in self && self['machine.openshift.io/cluster-api-machine-type'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'"
+ // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-cluster' in self",message="label 'machine.openshift.io/cluster-api-cluster' is required"
+ // +kubebuilder:validation:Required
+ Labels map[string]string `json:"labels"`
+
+ // Annotations is an unstructured key value map stored with a resource that may be
+ // set by external tools to store and retrieve arbitrary metadata. They are not
+ // queryable and should be preserved when modifying objects.
+ // More info: http://kubernetes.io/docs/user-guide/annotations
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// ControlPlaneMachineSetStrategy defines the strategy for applying updates to the
+// Control Plane Machines managed by the ControlPlaneMachineSet.
+type ControlPlaneMachineSetStrategy struct {
+ // Type defines the type of update strategy that should be
+ // used when updating Machines owned by the ControlPlaneMachineSet.
+ // Valid values are "RollingUpdate" and "OnDelete".
+ // The current default value is "RollingUpdate".
+ // +kubebuilder:default:="RollingUpdate"
+ // +default="RollingUpdate"
+ // +kubebuilder:validation:Enum:="RollingUpdate";"OnDelete"
+ // +optional
+ Type ControlPlaneMachineSetStrategyType `json:"type,omitempty"`
+
+ // This is left as a struct to allow future rolling update
+ // strategy configuration to be added later.
+}
+
+// ControlPlaneMachineSetStrategyType is an enumeration of different update strategies
+// for the Control Plane Machines.
+type ControlPlaneMachineSetStrategyType string
+
+const (
+ // RollingUpdate is the default update strategy type for a
+ // ControlPlaneMachineSet. This will cause the ControlPlaneMachineSet to
+ // first create a new Machine and wait for this to be Ready
+ // before removing the Machine chosen for replacement.
+ RollingUpdate ControlPlaneMachineSetStrategyType = "RollingUpdate"
+
+ // Recreate causes the ControlPlaneMachineSet controller to first
+ // remove a ControlPlaneMachine before creating its
+ // replacement. This allows for scenarios with limited capacity
+ // such as baremetal environments where additional capacity to
+ // perform rolling updates is not available.
+ Recreate ControlPlaneMachineSetStrategyType = "Recreate"
+
+ // OnDelete causes the ControlPlaneMachineSet to only replace a
+ // Machine once it has been marked for deletion. This strategy
+ // makes the rollout of updated specifications into a manual
+ // process. This allows users to test new configuration on
+ // a single Machine without forcing the rollout of all of their
+ // Control Plane Machines.
+ OnDelete ControlPlaneMachineSetStrategyType = "OnDelete"
+)
+
+// FailureDomain represents the different configurations required to spread Machines
+// across failure domains on different platforms.
+// +union
+// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws configuration is required when platform is AWS, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Azure' ? has(self.azure) : !has(self.azure)",message="azure configuration is required when platform is Azure, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'GCP' ? has(self.gcp) : !has(self.gcp)",message="gcp configuration is required when platform is GCP, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'OpenStack' ? has(self.openstack) : !has(self.openstack)",message="openstack configuration is required when platform is OpenStack, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'VSphere' ? has(self.vsphere) : !has(self.vsphere)",message="vsphere configuration is required when platform is VSphere, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Nutanix' ? has(self.nutanix) : !has(self.nutanix)",message="nutanix configuration is required when platform is Nutanix, and forbidden otherwise"
+type FailureDomains struct {
+ // Platform identifies the platform for which the FailureDomain represents.
+ // Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ Platform configv1.PlatformType `json:"platform"`
+
+ // AWS configures failure domain information for the AWS platform.
+ // +optional
+ AWS *[]AWSFailureDomain `json:"aws,omitempty"`
+
+ // Azure configures failure domain information for the Azure platform.
+ // +optional
+ Azure *[]AzureFailureDomain `json:"azure,omitempty"`
+
+ // GCP configures failure domain information for the GCP platform.
+ // +optional
+ GCP *[]GCPFailureDomain `json:"gcp,omitempty"`
+
+ // vsphere configures failure domain information for the VSphere platform.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ VSphere []VSphereFailureDomain `json:"vsphere,omitempty"`
+
+ // OpenStack configures failure domain information for the OpenStack platform.
+ // +optional
+ //
+ // + ---
+ // + Unlike other platforms, OpenStack failure domains can be empty.
+ // + Some OpenStack deployments may not have availability zones or root volumes.
+ // + Therefore we'll check the length of the list to determine if it's empty instead
+ // + of nil if it would be a pointer.
+ // +optional
+ OpenStack []OpenStackFailureDomain `json:"openstack,omitempty"`
+
+ // nutanix configures failure domain information for the Nutanix platform.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ Nutanix []NutanixFailureDomainReference `json:"nutanix,omitempty"`
+}
+
+// AWSFailureDomain configures failure domain information for the AWS platform.
+// +kubebuilder:validation:MinProperties:=1
+type AWSFailureDomain struct {
+ // Subnet is a reference to the subnet to use for this instance.
+ // +optional
+ Subnet *AWSResourceReference `json:"subnet,omitempty"`
+
+ // Placement configures the placement information for this instance.
+ // +optional
+ Placement AWSFailureDomainPlacement `json:"placement,omitempty"`
+}
+
+// AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain.
+type AWSFailureDomainPlacement struct {
+ // AvailabilityZone is the availability zone of the instance.
+ // +kubebuilder:validation:Required
+ AvailabilityZone string `json:"availabilityZone"`
+}
+
+// AzureFailureDomain configures failure domain information for the Azure platform.
+type AzureFailureDomain struct {
+ // Availability Zone for the virtual machine.
+ // If nil, the virtual machine should be deployed to no zone.
+ // +kubebuilder:validation:Required
+ Zone string `json:"zone"`
+
+ // subnet is the name of the network subnet in which the VM will be created.
+ // When omitted, the subnet value from the machine providerSpec template will be used.
+ // +kubebuilder:validation:MaxLength=80
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9](?:[a-zA-Z0-9._-]*[a-zA-Z0-9_])?$`
+ // +optional
+ Subnet string `json:"subnet,omitempty"`
+}
+
+// GCPFailureDomain configures failure domain information for the GCP platform
+type GCPFailureDomain struct {
+ // Zone is the zone in which the GCP machine provider will create the VM.
+ // +kubebuilder:validation:Required
+ Zone string `json:"zone"`
+}
+
+// VSphereFailureDomain configures failure domain information for the vSphere platform
+type VSphereFailureDomain struct {
+ // name of the failure domain in which the vSphere machine provider will create the VM.
+ // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource.
+ // When balancing machines across failure domains, the control plane machine set will inject configuration from the
+ // Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain.
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+}
+
+// OpenStackFailureDomain configures failure domain information for the OpenStack platform.
+// +kubebuilder:validation:MinProperties:=1
+// +kubebuilder:validation:XValidation:rule="!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)",message="rootVolume.availabilityZone is required when availabilityZone is set"
+type OpenStackFailureDomain struct {
+ // availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM.
+ // If not specified, the VM will be created in the default availability zone specified in the nova configuration.
+ // Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances
+ // are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs
+ // to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information.
+ // The maximum length of availability zone name is 63 as per labels limits.
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Pattern=`^[^: ]*$`
+ // +kubebuilder:validation:MaxLength=63
+ // +optional
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+
+ // rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM.
+ // If not specified, no root volume will be created.
+ //
+ // + ---
+ // + RootVolume must be a pointer to allow us to require at least one valid property is set within the failure domain.
+ // + If it were a reference then omitempty doesn't work and the minProperties validations are no longer valid.
+ // +optional
+ RootVolume *RootVolume `json:"rootVolume,omitempty"`
+}
+
+// NutanixFailureDomainReference refers to the failure domain of the Nutanix platform.
+type NutanixFailureDomainReference struct {
+ // name of the failure domain in which the nutanix machine provider will create the VM.
+ // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=64
+ // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?`
+ Name string `json:"name"`
+}
+
+// RootVolume represents the volume metadata to boot from.
+// The original RootVolume struct is defined in the v1alpha1 but it's not best practice to use it directly here so we define a new one
+// that should stay in sync with the original one.
+type RootVolume struct {
+ // availabilityZone specifies the Cinder availability zone where the root volume will be created.
+ // If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration.
+ // If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability
+ // zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details.
+ // If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same
+ // availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone).
+ // Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure,
+ // see kubernetes/cloud-provider-openstack#1379 for further information.
+ // The maximum length of availability zone name is 63 as per labels limits.
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Pattern=`^[^ ]*$`
+ // +optional
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+
+ // volumeType specifies the type of the root volume that will be provisioned.
+ // The maximum length of a volume type name is 255 characters, as per the OpenStack limit.
+ // + ---
+ // + Historically, the installer has always required a volume type to be specified when deploying
+ // + the control plane with a root volume. This is because the default volume type in Cinder is not guaranteed
+ // + to be available, therefore we prefer the user to be explicit about the volume type to use.
+ // + We apply the same logic in CPMS: if the failure domain specifies a root volume, we require the user to specify a volume type.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=255
+ VolumeType string `json:"volumeType"`
+}
+
+// ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD.
+type ControlPlaneMachineSetStatus struct {
+ // Conditions represents the observations of the ControlPlaneMachineSet's current state.
+ // Known .status.conditions.type are: Available, Degraded and Progressing.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // ObservedGeneration is the most recent generation observed for this
+ // ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation,
+ // which is updated on mutation by the API Server.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Replicas is the number of Control Plane Machines created by the
+ // ControlPlaneMachineSet controller.
+ // Note that during update operations this value may differ from the
+ // desired replica count.
+ // +optional
+ Replicas int32 `json:"replicas,omitempty"`
+
+ // ReadyReplicas is the number of Control Plane Machines created by the
+ // ControlPlaneMachineSet controller which are ready.
+ // Note that this value may be higher than the desired number of replicas
+ // while rolling updates are in-progress.
+ // +optional
+ ReadyReplicas int32 `json:"readyReplicas,omitempty"`
+
+ // UpdatedReplicas is the number of non-terminated Control Plane Machines
+ // created by the ControlPlaneMachineSet controller that have the desired
+ // provider spec and are ready.
+ // This value is set to 0 when a change is detected to the desired spec.
+ // When the update strategy is RollingUpdate, this will also coincide
+ // with starting the process of updating the Machines.
+ // When the update strategy is OnDelete, this value will remain at 0 until
+ // a user deletes an existing replica and its replacement has become ready.
+ // +optional
+ UpdatedReplicas int32 `json:"updatedReplicas,omitempty"`
+
+ // UnavailableReplicas is the number of Control Plane Machines that are
+ // still required before the ControlPlaneMachineSet reaches the desired
+ // available capacity. When this value is non-zero, the number of
+ // ReadyReplicas is less than the desired Replicas.
+ // +optional
+ UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ControlPlaneMachineSetList contains a list of ControlPlaneMachineSet
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ControlPlaneMachineSetList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []ControlPlaneMachineSet `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go
new file mode 100644
index 0000000000..1370ebdd28
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go
@@ -0,0 +1,177 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NutanixMachineProviderConfig is the Schema for the nutanixmachineproviderconfigs API
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:openapi-gen=true
+type NutanixMachineProviderConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // cluster is to identify the cluster (the Prism Element under management
+ // of the Prism Central), in which the Machine's VM will be created.
+ // The cluster identifier (uuid or name) can be obtained from the Prism Central console
+ // or using the prism_central API.
+ // +kubebuilder:validation:Required
+ Cluster NutanixResourceIdentifier `json:"cluster"`
+
+ // image is to identify the rhcos image uploaded to the Prism Central (PC)
+ // The image identifier (uuid or name) can be obtained from the Prism Central console
+ // or using the prism_central API.
+ // +kubebuilder:validation:Required
+ Image NutanixResourceIdentifier `json:"image"`
+
+ // subnets holds a list of identifiers (one or more) of the cluster's network subnets
+ // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be
+ // obtained from the Prism Central console or using the prism_central API.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ Subnets []NutanixResourceIdentifier `json:"subnets"`
+
+ // vcpusPerSocket is the number of vCPUs per socket of the VM
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ VCPUsPerSocket int32 `json:"vcpusPerSocket"`
+
+ // vcpuSockets is the number of vCPU sockets of the VM
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ VCPUSockets int32 `json:"vcpuSockets"`
+
+ // memorySize is the memory size (in Quantity format) of the VM
+ // The minimum memorySize is 2Gi bytes
+ // +kubebuilder:validation:Required
+ MemorySize resource.Quantity `json:"memorySize"`
+
+ // systemDiskSize is size (in Quantity format) of the system disk of the VM
+ // The minimum systemDiskSize is 20Gi bytes
+ // +kubebuilder:validation:Required
+ SystemDiskSize resource.Quantity `json:"systemDiskSize"`
+
+ // bootType indicates the boot type (Legacy, UEFI or SecureBoot) the Machine's VM uses to boot.
+ // If this field is empty or omitted, the VM will use the default boot type "Legacy" to boot.
+ // "SecureBoot" depends on "UEFI" boot, i.e., enabling "SecureBoot" means that "UEFI" boot is also enabled.
+ // +kubebuilder:validation:Enum="";Legacy;UEFI;SecureBoot
+ // +optional
+ BootType NutanixBootType `json:"bootType"`
+
+ // project optionally identifies a Prism project for the Machine's VM to associate with.
+ // +optional
+ Project NutanixResourceIdentifier `json:"project"`
+
+ // categories optionally adds one or more prism categories (each with key and value) for
+ // the Machine's VM to associate with. All the category key and value pairs specified must
+ // already exist in the prism central.
+ // +listType=map
+ // +listMapKey=key
+ // +optional
+ Categories []NutanixCategory `json:"categories"`
+
+ // userDataSecret is a local reference to a secret that contains the
+ // UserData to apply to the VM
+ UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"`
+
+ // credentialsSecret is a local reference to a secret that contains the
+ // credentials data to access Nutanix PC client
+ // +kubebuilder:validation:Required
+ CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret"`
+
+ // failureDomain refers to the name of the FailureDomain with which this Machine is associated.
+ // If this is configured, the Nutanix machine controller will use the prism_central endpoint
+ // and credentials defined in the referenced FailureDomain to communicate to the prism_central.
+ // It will also verify that the 'cluster' and subnets' configuration in the NutanixMachineProviderConfig
+ // is consistent with that in the referenced failureDomain.
+ // +optional
+ FailureDomain *NutanixFailureDomainReference `json:"failureDomain"`
+}
+
+// NutanixCategory identifies a pair of prism category key and value
+type NutanixCategory struct {
+ // key is the prism category key name
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=64
+ // +kubebuilder:validation:Required
+ Key string `json:"key"`
+
+ // value is the prism category value associated with the key
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=64
+ // +kubebuilder:validation:Required
+ Value string `json:"value"`
+}
+
+// NutanixBootType is an enumeration of different boot types for Nutanix VM.
+type NutanixBootType string
+
+const (
+ // NutanixLegacyBoot is the legacy BIOS boot type
+ NutanixLegacyBoot NutanixBootType = "Legacy"
+
+ // NutanixUEFIBoot is the UEFI boot type
+ NutanixUEFIBoot NutanixBootType = "UEFI"
+
+ // NutanixSecureBoot is the Secure boot type
+ NutanixSecureBoot NutanixBootType = "SecureBoot"
+)
+
+// NutanixIdentifierType is an enumeration of different resource identifier types.
+type NutanixIdentifierType string
+
+const (
+ // NutanixIdentifierUUID is a resource identifier identifying the object by UUID.
+ NutanixIdentifierUUID NutanixIdentifierType = "uuid"
+
+ // NutanixIdentifierName is a resource identifier identifying the object by Name.
+ NutanixIdentifierName NutanixIdentifierType = "name"
+)
+
+// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)
+// +union
+type NutanixResourceIdentifier struct {
+ // Type is the identifier type to use for this resource.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum:=uuid;name
+ Type NutanixIdentifierType `json:"type"`
+
+ // uuid is the UUID of the resource in the PC.
+ // +optional
+ UUID *string `json:"uuid,omitempty"`
+
+ // name is the resource name in the PC
+ // +optional
+ Name *string `json:"name,omitempty"`
+}
+
+// NutanixMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field.
+// It contains nutanix-specific status information.
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type NutanixMachineProviderStatus struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // conditions is a set of conditions associated with the Machine to indicate
+ // errors or other status
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // vmUUID is the Machine associated VM's UUID
+ // The field is missing before the VM is created.
+ // Once the VM is created, the field is filled with the VM's UUID and it will not change.
+ // The vmUUID is used to find the VM when updating the Machine status,
+ // and to delete the VM when the Machine is deleted.
+ // +optional
+ VmUUID *string `json:"vmUUID,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go
new file mode 100644
index 0000000000..c131139c54
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go
@@ -0,0 +1,227 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// PowerVSResourceType enum attribute to identify the type of resource reference
+type PowerVSResourceType string
+
+// PowerVSProcessorType enum attribute to identify the PowerVS instance processor type
+type PowerVSProcessorType string
+
+// IBMVPCLoadBalancerType is the type of LoadBalancer to use when registering
+// an instance with load balancers specified in LoadBalancerNames
+type IBMVPCLoadBalancerType string
+
+// ApplicationLoadBalancerType is possible values for IBMVPCLoadBalancerType.
+const (
+ ApplicationLoadBalancerType IBMVPCLoadBalancerType = "Application" // Application Load Balancer for VPC (ALB)
+)
+
+const (
+ // PowerVSResourceTypeID enum property to identify an ID type resource reference
+ PowerVSResourceTypeID PowerVSResourceType = "ID"
+ // PowerVSResourceTypeName enum property to identify a Name type resource reference
+ PowerVSResourceTypeName PowerVSResourceType = "Name"
+ // PowerVSResourceTypeRegEx enum property to identify a tags type resource reference
+ PowerVSResourceTypeRegEx PowerVSResourceType = "RegEx"
+ // PowerVSProcessorTypeDedicated enum property to identify a Dedicated Power VS processor type
+ PowerVSProcessorTypeDedicated PowerVSProcessorType = "Dedicated"
+ // PowerVSProcessorTypeShared enum property to identify a Shared Power VS processor type
+ PowerVSProcessorTypeShared PowerVSProcessorType = "Shared"
+ // PowerVSProcessorTypeCapped enum property to identify a Capped Power VS processor type
+ PowerVSProcessorTypeCapped PowerVSProcessorType = "Capped"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PowerVSMachineProviderConfig is the type that will be embedded in a Machine.Spec.ProviderSpec field
+// for a PowerVS virtual machine. It is used by the PowerVS machine actuator to create a single Machine.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:openapi-gen=true
+type PowerVSMachineProviderConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // userDataSecret contains a local reference to a secret that contains the
+ // UserData to apply to the instance.
+ // +optional
+ UserDataSecret *PowerVSSecretReference `json:"userDataSecret,omitempty"`
+
+ // credentialsSecret is a reference to the secret with IBM Cloud credentials.
+ // +optional
+ CredentialsSecret *PowerVSSecretReference `json:"credentialsSecret,omitempty"`
+
+ // serviceInstance is the reference to the Power VS service on which the server instance(VM) will be created.
+ // Power VS service is a container for all Power VS instances at a specific geographic region.
+ // serviceInstance can be created via IBM Cloud catalog or CLI.
+ // supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli.
+ // More detail about Power VS service instance.
+ // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server
+ // +kubebuilder:validation:=Required
+ ServiceInstance PowerVSResource `json:"serviceInstance"`
+
+ // image is to identify the rhcos image uploaded to IBM COS bucket which is used to create the instance.
+ // supported image identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli.
+ // +kubebuilder:validation:=Required
+ Image PowerVSResource `json:"image"`
+
+ // network is the reference to the Network to use for this instance.
+ // supported network identifier in PowerVSResource are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli.
+ // +kubebuilder:validation:=Required
+ Network PowerVSResource `json:"network"`
+
+ // keyPairName is the name of the KeyPair to use for SSH.
+ // The key pair will be exposed to the instance via the instance metadata service.
+ // On boot, the OS will copy the public keypair into the authorized keys for the core user.
+ // +kubebuilder:validation:=Required
+ KeyPairName string `json:"keyPairName"`
+
+ // systemType is the System type used to host the instance.
+ // systemType determines the number of cores and memory that is available.
+ // Few of the supported SystemTypes are s922,e880,e980.
+ // e880 systemType available only in Dallas Datacenters.
+ // e980 systemType available in Datacenters except Dallas and Washington.
+ // When omitted, this means that the user has no opinion and the platform is left to choose a
+ // reasonable default, which is subject to change over time. The current default is s922 which is generally available.
+ // + This is not an enum because we expect other values to be added later which should be supported implicitly.
+ // +optional
+ SystemType string `json:"systemType,omitempty"`
+
+ // processorType is the VM instance processor type.
+ // It must be set to one of the following values: Dedicated, Capped or Shared.
+ // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core.
+ // Shared: Shared among other clients.
+ // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement.
+ // if the processorType is selected as Dedicated, then processors value cannot be fractional.
+ // When omitted, this means that the user has no opinion and the platform is left to choose a
+ // reasonable default, which is subject to change over time. The current default is Shared.
+ // +kubebuilder:validation:Enum:="Dedicated";"Shared";"Capped";""
+ // +optional
+ ProcessorType PowerVSProcessorType `json:"processorType,omitempty"`
+
+ // processors is the number of virtual processors in a virtual machine.
+ // when the processorType is selected as Dedicated the processors value cannot be fractional.
+ // maximum value for the Processors depends on the selected SystemType.
+ // when SystemType is set to e880 or e980 maximum Processors value is 143.
+ // when SystemType is set to s922 maximum Processors value is 15.
+ // minimum value for Processors depends on the selected ProcessorType.
+ // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5.
+ // when ProcessorType is set as Dedicated, The minimum processors is 1.
+ // When omitted, this means that the user has no opinion and the platform is left to choose a
+ // reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType.
+ // when ProcessorType selected as Dedicated, the default is set to 1.
+ // when ProcessorType selected as Shared or Capped, the default is set to 0.5.
+ // +optional
+ Processors intstr.IntOrString `json:"processors,omitempty"`
+
+ // memoryGiB is the size of a virtual machine's memory, in GiB.
+ // maximum value for the MemoryGiB depends on the selected SystemType.
+ // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB.
+ // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB.
+ // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB.
+ // The minimum memory is 32 GiB.
+ // When omitted, this means the user has no opinion and the platform is left to choose a reasonable
+ // default, which is subject to change over time. The current default is 32.
+ // +optional
+ MemoryGiB int32 `json:"memoryGiB,omitempty"`
+
+ // loadBalancers is the set of load balancers to which the new control plane instance
+ // should be added once it is created.
+ // +optional
+ LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"`
+}
+
+// PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx
+// Only one of ID, Name or RegEx may be specified. Specifying more than one will result in
+// a validation error.
+// +union
+type PowerVSResource struct {
+ // Type identifies the resource type for this entry.
+ // Valid values are ID, Name and RegEx
+ // +kubebuilder:validation:Enum:=ID;Name;RegEx
+ // +optional
+ Type PowerVSResourceType `json:"type,omitempty"`
+ // ID of resource
+ // +optional
+ ID *string `json:"id,omitempty"`
+ // Name of resource
+ // +optional
+ Name *string `json:"name,omitempty"`
+ // Regex to find resource
+ // Regex contains the pattern to match to find a resource
+ // +optional
+ RegEx *string `json:"regex,omitempty"`
+}
+
+// PowerVSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field.
+// It contains PowerVS-specific status information.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PowerVSMachineProviderStatus struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // conditions is a set of conditions associated with the Machine to indicate
+ // errors or other status
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // instanceId is the instance ID of the machine created in PowerVS
+ // instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service.
+ // This will help in updating or deleting a VM in Power VS Cloud
+ // +optional
+ InstanceID *string `json:"instanceId,omitempty"`
+
+ // serviceInstanceID is the reference to the Power VS ServiceInstance on which the machine instance will be created.
+ // serviceInstanceID uniquely identifies the Power VS service
+ // By setting serviceInstanceID it will become easy and efficient to fetch a server instance(VM) within Power VS Cloud.
+ // +optional
+ ServiceInstanceID *string `json:"serviceInstanceID,omitempty"`
+
+ // instanceState is the state of the PowerVS instance for this machine
+ // Possible instance states are Active, Build, ShutOff, Reboot
+ // This is used to display additional information to user regarding instance current state
+ // +optional
+ InstanceState *string `json:"instanceState,omitempty"`
+}
+
+// PowerVSSecretReference contains enough information to locate the
+// referenced secret inside the same namespace.
+// +structType=atomic
+type PowerVSSecretReference struct {
+ // Name of the secret.
+ // +optional
+ Name string `json:"name,omitempty"`
+}
+
+// LoadBalancerReference is a reference to a load balancer on IBM Cloud virtual private cloud(VPC).
+type LoadBalancerReference struct {
+ // name of the LoadBalancer in IBM Cloud VPC.
+ // The name should be between 1 and 63 characters long and may consist of lowercase alphanumeric characters and hyphens only.
+ // The value must not end with a hyphen.
+ // It is a reference to existing LoadBalancer created by openshift installer component.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^([a-z]|[a-z][-a-z0-9]*[a-z0-9]|[0-9][-a-z0-9]*([a-z]|[-a-z][-a-z0-9]*[a-z0-9]))$`
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ Name string `json:"name"`
+ // type of the LoadBalancer service supported by IBM Cloud VPC.
+ // Currently, only Application LoadBalancer is supported.
+ // More details about Application LoadBalancer
+ // https://cloud.ibm.com/docs/vpc?topic=vpc-load-balancers-about&interface=ui
+ // Supported values are Application.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum:="Application"
+ Type IBMVPCLoadBalancerType `json:"type"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..ffd8e951f2
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go
@@ -0,0 +1,993 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSFailureDomain) DeepCopyInto(out *AWSFailureDomain) {
+ *out = *in
+ if in.Subnet != nil {
+ in, out := &in.Subnet, &out.Subnet
+ *out = new(AWSResourceReference)
+ (*in).DeepCopyInto(*out)
+ }
+ out.Placement = in.Placement
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSFailureDomain.
+func (in *AWSFailureDomain) DeepCopy() *AWSFailureDomain {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSFailureDomain)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSFailureDomainPlacement) DeepCopyInto(out *AWSFailureDomainPlacement) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSFailureDomainPlacement.
+func (in *AWSFailureDomainPlacement) DeepCopy() *AWSFailureDomainPlacement {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSFailureDomainPlacement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSResourceFilter) DeepCopyInto(out *AWSResourceFilter) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceFilter.
+func (in *AWSResourceFilter) DeepCopy() *AWSResourceFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSResourceFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = new(string)
+ **out = **in
+ }
+ if in.ARN != nil {
+ in, out := &in.ARN, &out.ARN
+ *out = new(string)
+ **out = **in
+ }
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = new([]AWSResourceFilter)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]AWSResourceFilter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference.
+func (in *AWSResourceReference) DeepCopy() *AWSResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlibabaCloudMachineProviderConfig) DeepCopyInto(out *AlibabaCloudMachineProviderConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.DataDisks != nil {
+ in, out := &in.DataDisks, &out.DataDisks
+ *out = make([]DataDiskProperties, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make([]AlibabaResourceReference, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.Bandwidth = in.Bandwidth
+ out.SystemDisk = in.SystemDisk
+ in.VSwitch.DeepCopyInto(&out.VSwitch)
+ in.ResourceGroup.DeepCopyInto(&out.ResourceGroup)
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.CredentialsSecret != nil {
+ in, out := &in.CredentialsSecret, &out.CredentialsSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]Tag, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderConfig.
+func (in *AlibabaCloudMachineProviderConfig) DeepCopy() *AlibabaCloudMachineProviderConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AlibabaCloudMachineProviderConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlibabaCloudMachineProviderConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlibabaCloudMachineProviderConfigList) DeepCopyInto(out *AlibabaCloudMachineProviderConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AlibabaCloudMachineProviderConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderConfigList.
+func (in *AlibabaCloudMachineProviderConfigList) DeepCopy() *AlibabaCloudMachineProviderConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(AlibabaCloudMachineProviderConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlibabaCloudMachineProviderConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlibabaCloudMachineProviderStatus) DeepCopyInto(out *AlibabaCloudMachineProviderStatus) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.InstanceID != nil {
+ in, out := &in.InstanceID, &out.InstanceID
+ *out = new(string)
+ **out = **in
+ }
+ if in.InstanceState != nil {
+ in, out := &in.InstanceState, &out.InstanceState
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderStatus.
+func (in *AlibabaCloudMachineProviderStatus) DeepCopy() *AlibabaCloudMachineProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AlibabaCloudMachineProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlibabaCloudMachineProviderStatus) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlibabaResourceReference) DeepCopyInto(out *AlibabaResourceReference) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = new([]Tag)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]Tag, len(*in))
+ copy(*out, *in)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaResourceReference.
+func (in *AlibabaResourceReference) DeepCopy() *AlibabaResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(AlibabaResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureFailureDomain) DeepCopyInto(out *AzureFailureDomain) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFailureDomain.
+func (in *AzureFailureDomain) DeepCopy() *AzureFailureDomain {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureFailureDomain)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BandwidthProperties) DeepCopyInto(out *BandwidthProperties) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthProperties.
+func (in *BandwidthProperties) DeepCopy() *BandwidthProperties {
+ if in == nil {
+ return nil
+ }
+ out := new(BandwidthProperties)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneMachineSet) DeepCopyInto(out *ControlPlaneMachineSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSet.
+func (in *ControlPlaneMachineSet) DeepCopy() *ControlPlaneMachineSet {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneMachineSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControlPlaneMachineSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneMachineSetList) DeepCopyInto(out *ControlPlaneMachineSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ControlPlaneMachineSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetList.
+func (in *ControlPlaneMachineSetList) DeepCopy() *ControlPlaneMachineSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneMachineSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ControlPlaneMachineSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneMachineSetSpec) DeepCopyInto(out *ControlPlaneMachineSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ out.Strategy = in.Strategy
+ in.Selector.DeepCopyInto(&out.Selector)
+ in.Template.DeepCopyInto(&out.Template)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetSpec.
+func (in *ControlPlaneMachineSetSpec) DeepCopy() *ControlPlaneMachineSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneMachineSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneMachineSetStatus) DeepCopyInto(out *ControlPlaneMachineSetStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetStatus.
+func (in *ControlPlaneMachineSetStatus) DeepCopy() *ControlPlaneMachineSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneMachineSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneMachineSetStrategy) DeepCopyInto(out *ControlPlaneMachineSetStrategy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetStrategy.
+func (in *ControlPlaneMachineSetStrategy) DeepCopy() *ControlPlaneMachineSetStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneMachineSetStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneMachineSetTemplate) DeepCopyInto(out *ControlPlaneMachineSetTemplate) {
+ *out = *in
+ if in.OpenShiftMachineV1Beta1Machine != nil {
+ in, out := &in.OpenShiftMachineV1Beta1Machine, &out.OpenShiftMachineV1Beta1Machine
+ *out = new(OpenShiftMachineV1Beta1MachineTemplate)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetTemplate.
+func (in *ControlPlaneMachineSetTemplate) DeepCopy() *ControlPlaneMachineSetTemplate {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneMachineSetTemplate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControlPlaneMachineSetTemplateObjectMeta) DeepCopyInto(out *ControlPlaneMachineSetTemplateObjectMeta) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetTemplateObjectMeta.
+func (in *ControlPlaneMachineSetTemplateObjectMeta) DeepCopy() *ControlPlaneMachineSetTemplateObjectMeta {
+ if in == nil {
+ return nil
+ }
+ out := new(ControlPlaneMachineSetTemplateObjectMeta)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DataDiskProperties) DeepCopyInto(out *DataDiskProperties) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskProperties.
+func (in *DataDiskProperties) DeepCopy() *DataDiskProperties {
+ if in == nil {
+ return nil
+ }
+ out := new(DataDiskProperties)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FailureDomains) DeepCopyInto(out *FailureDomains) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new([]AWSFailureDomain)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]AWSFailureDomain, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new([]AzureFailureDomain)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]AzureFailureDomain, len(*in))
+ copy(*out, *in)
+ }
+ }
+ if in.GCP != nil {
+ in, out := &in.GCP, &out.GCP
+ *out = new([]GCPFailureDomain)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]GCPFailureDomain, len(*in))
+ copy(*out, *in)
+ }
+ }
+ if in.VSphere != nil {
+ in, out := &in.VSphere, &out.VSphere
+ *out = make([]VSphereFailureDomain, len(*in))
+ copy(*out, *in)
+ }
+ if in.OpenStack != nil {
+ in, out := &in.OpenStack, &out.OpenStack
+ *out = make([]OpenStackFailureDomain, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Nutanix != nil {
+ in, out := &in.Nutanix, &out.Nutanix
+ *out = make([]NutanixFailureDomainReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailureDomains.
+func (in *FailureDomains) DeepCopy() *FailureDomains {
+ if in == nil {
+ return nil
+ }
+ out := new(FailureDomains)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPFailureDomain) DeepCopyInto(out *GCPFailureDomain) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPFailureDomain.
+func (in *GCPFailureDomain) DeepCopy() *GCPFailureDomain {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPFailureDomain)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerReference) DeepCopyInto(out *LoadBalancerReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerReference.
+func (in *LoadBalancerReference) DeepCopy() *LoadBalancerReference {
+ if in == nil {
+ return nil
+ }
+ out := new(LoadBalancerReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixCategory) DeepCopyInto(out *NutanixCategory) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixCategory.
+func (in *NutanixCategory) DeepCopy() *NutanixCategory {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixCategory)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixFailureDomainReference) DeepCopyInto(out *NutanixFailureDomainReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixFailureDomainReference.
+func (in *NutanixFailureDomainReference) DeepCopy() *NutanixFailureDomainReference {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixFailureDomainReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixMachineProviderConfig) DeepCopyInto(out *NutanixMachineProviderConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Cluster.DeepCopyInto(&out.Cluster)
+ in.Image.DeepCopyInto(&out.Image)
+ if in.Subnets != nil {
+ in, out := &in.Subnets, &out.Subnets
+ *out = make([]NutanixResourceIdentifier, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.MemorySize = in.MemorySize.DeepCopy()
+ out.SystemDiskSize = in.SystemDiskSize.DeepCopy()
+ in.Project.DeepCopyInto(&out.Project)
+ if in.Categories != nil {
+ in, out := &in.Categories, &out.Categories
+ *out = make([]NutanixCategory, len(*in))
+ copy(*out, *in)
+ }
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.CredentialsSecret != nil {
+ in, out := &in.CredentialsSecret, &out.CredentialsSecret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.FailureDomain != nil {
+ in, out := &in.FailureDomain, &out.FailureDomain
+ *out = new(NutanixFailureDomainReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixMachineProviderConfig.
+func (in *NutanixMachineProviderConfig) DeepCopy() *NutanixMachineProviderConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixMachineProviderConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NutanixMachineProviderConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixMachineProviderStatus) DeepCopyInto(out *NutanixMachineProviderStatus) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.VmUUID != nil {
+ in, out := &in.VmUUID, &out.VmUUID
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixMachineProviderStatus.
+func (in *NutanixMachineProviderStatus) DeepCopy() *NutanixMachineProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixMachineProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NutanixMachineProviderStatus) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) {
+ *out = *in
+ if in.UUID != nil {
+ in, out := &in.UUID, &out.UUID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier.
+func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier {
+ if in == nil {
+ return nil
+ }
+ out := new(NutanixResourceIdentifier)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftMachineV1Beta1MachineTemplate) DeepCopyInto(out *OpenShiftMachineV1Beta1MachineTemplate) {
+ *out = *in
+ if in.FailureDomains != nil {
+ in, out := &in.FailureDomains, &out.FailureDomains
+ *out = new(FailureDomains)
+ (*in).DeepCopyInto(*out)
+ }
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftMachineV1Beta1MachineTemplate.
+func (in *OpenShiftMachineV1Beta1MachineTemplate) DeepCopy() *OpenShiftMachineV1Beta1MachineTemplate {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftMachineV1Beta1MachineTemplate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackFailureDomain) DeepCopyInto(out *OpenStackFailureDomain) {
+ *out = *in
+ if in.RootVolume != nil {
+ in, out := &in.RootVolume, &out.RootVolume
+ *out = new(RootVolume)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackFailureDomain.
+func (in *OpenStackFailureDomain) DeepCopy() *OpenStackFailureDomain {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackFailureDomain)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSMachineProviderConfig) DeepCopyInto(out *PowerVSMachineProviderConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(PowerVSSecretReference)
+ **out = **in
+ }
+ if in.CredentialsSecret != nil {
+ in, out := &in.CredentialsSecret, &out.CredentialsSecret
+ *out = new(PowerVSSecretReference)
+ **out = **in
+ }
+ in.ServiceInstance.DeepCopyInto(&out.ServiceInstance)
+ in.Image.DeepCopyInto(&out.Image)
+ in.Network.DeepCopyInto(&out.Network)
+ out.Processors = in.Processors
+ if in.LoadBalancers != nil {
+ in, out := &in.LoadBalancers, &out.LoadBalancers
+ *out = make([]LoadBalancerReference, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSMachineProviderConfig.
+func (in *PowerVSMachineProviderConfig) DeepCopy() *PowerVSMachineProviderConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSMachineProviderConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PowerVSMachineProviderConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSMachineProviderStatus) DeepCopyInto(out *PowerVSMachineProviderStatus) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.InstanceID != nil {
+ in, out := &in.InstanceID, &out.InstanceID
+ *out = new(string)
+ **out = **in
+ }
+ if in.ServiceInstanceID != nil {
+ in, out := &in.ServiceInstanceID, &out.ServiceInstanceID
+ *out = new(string)
+ **out = **in
+ }
+ if in.InstanceState != nil {
+ in, out := &in.InstanceState, &out.InstanceState
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSMachineProviderStatus.
+func (in *PowerVSMachineProviderStatus) DeepCopy() *PowerVSMachineProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSMachineProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PowerVSMachineProviderStatus) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSResource) DeepCopyInto(out *PowerVSResource) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = new(string)
+ **out = **in
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.RegEx != nil {
+ in, out := &in.RegEx, &out.RegEx
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSResource.
+func (in *PowerVSResource) DeepCopy() *PowerVSResource {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PowerVSSecretReference) DeepCopyInto(out *PowerVSSecretReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSSecretReference.
+func (in *PowerVSSecretReference) DeepCopy() *PowerVSSecretReference {
+ if in == nil {
+ return nil
+ }
+ out := new(PowerVSSecretReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RootVolume) DeepCopyInto(out *RootVolume) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume.
+func (in *RootVolume) DeepCopy() *RootVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(RootVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SystemDiskProperties) DeepCopyInto(out *SystemDiskProperties) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemDiskProperties.
+func (in *SystemDiskProperties) DeepCopy() *SystemDiskProperties {
+ if in == nil {
+ return nil
+ }
+ out := new(SystemDiskProperties)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Tag) DeepCopyInto(out *Tag) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tag.
+func (in *Tag) DeepCopy() *Tag {
+ if in == nil {
+ return nil
+ }
+ out := new(Tag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSphereFailureDomain) DeepCopyInto(out *VSphereFailureDomain) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomain.
+func (in *VSphereFailureDomain) DeepCopy() *VSphereFailureDomain {
+ if in == nil {
+ return nil
+ }
+ out := new(VSphereFailureDomain)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..258caa113b
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,51 @@
+controlplanemachinesets.machine.openshift.io:
+ Annotations:
+ exclude.release.openshift.io/internal-openshift-hosted: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1112
+ CRDName: controlplanemachinesets.machine.openshift.io
+ Capability: MachineAPI
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: control-plane-machine-set
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: machine.openshift.io
+ HasStatus: true
+ KindName: ControlPlaneMachineSet
+ Labels: {}
+ PluralName: controlplanemachinesets
+ PrinterColumns:
+ - description: Desired Replicas
+ jsonPath: .spec.replicas
+ name: Desired
+ type: integer
+ - description: Current Replicas
+ jsonPath: .status.replicas
+ name: Current
+ type: integer
+ - description: Ready Replicas
+ jsonPath: .status.readyReplicas
+ name: Ready
+ type: integer
+ - description: Updated Replicas
+ jsonPath: .status.updatedReplicas
+ name: Updated
+ type: integer
+ - description: Observed number of unavailable replicas
+ jsonPath: .status.unavailableReplicas
+ name: Unavailable
+ type: integer
+ - description: ControlPlaneMachineSet state
+ jsonPath: .spec.state
+ name: State
+ type: string
+ - description: ControlPlaneMachineSet age
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..44fed0c1e1
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,433 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AlibabaCloudMachineProviderConfig = map[string]string{
+ "": "AlibabaCloudMachineProviderConfig is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "instanceType": "The instance type of the instance.",
+ "vpcId": "The ID of the vpc",
+ "regionId": "The ID of the region in which to create the instance. You can call the DescribeRegions operation to query the most recent region list.",
+ "zoneId": "The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list.",
+ "imageId": "The ID of the image used to create the instance.",
+ "dataDisk": "DataDisks holds information regarding the extra disks attached to the instance",
+ "securityGroups": "SecurityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm",
+ "bandwidth": "Bandwidth describes the internet bandwidth strategy for the instance",
+ "systemDisk": "SystemDisk holds the properties regarding the system disk for the instance",
+ "vSwitch": "VSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.",
+ "ramRoleName": "RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.",
+ "resourceGroup": "ResourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.",
+ "tenancy": "Tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.",
+ "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance",
+ "credentialsSecret": "CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.",
+ "tag": "Tags are the set of metadata to add to an instance.",
+}
+
+func (AlibabaCloudMachineProviderConfig) SwaggerDoc() map[string]string {
+ return map_AlibabaCloudMachineProviderConfig
+}
+
+var map_AlibabaCloudMachineProviderConfigList = map[string]string{
+ "": "AlibabaCloudMachineProviderConfigList contains a list of AlibabaCloudMachineProviderConfig Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (AlibabaCloudMachineProviderConfigList) SwaggerDoc() map[string]string {
+ return map_AlibabaCloudMachineProviderConfigList
+}
+
+var map_AlibabaCloudMachineProviderStatus = map[string]string{
+ "": "AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "instanceId": "InstanceID is the instance ID of the machine created in alibabacloud",
+ "instanceState": "InstanceState is the state of the alibabacloud instance for this machine",
+ "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status",
+}
+
+func (AlibabaCloudMachineProviderStatus) SwaggerDoc() map[string]string {
+ return map_AlibabaCloudMachineProviderStatus
+}
+
+var map_AlibabaResourceReference = map[string]string{
+ "": "ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags. Only one of ID or Tags may be specified. Specifying more than one will result in a validation error.",
+ "type": "type identifies the resource reference type for this entry.",
+ "id": "ID of resource",
+ "name": "Name of the resource",
+ "tags": "Tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.",
+}
+
+func (AlibabaResourceReference) SwaggerDoc() map[string]string {
+ return map_AlibabaResourceReference
+}
+
+var map_BandwidthProperties = map[string]string{
+ "": "Bandwidth describes the bandwidth strategy for the network of the instance",
+ "internetMaxBandwidthIn": "InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.",
+ "internetMaxBandwidthOut": "InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`",
+}
+
+func (BandwidthProperties) SwaggerDoc() map[string]string {
+ return map_BandwidthProperties
+}
+
+var map_DataDiskProperties = map[string]string{
+ "": "DataDisk contains the information regarding the datadisk attached to an instance",
+ "Name": "Name is the name of data disk N. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).\n\nEmpty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.",
+ "SnapshotID": "SnapshotID is the ID of the snapshot used to create data disk N. Valid values of N: 1 to 16.\n\nWhen the DataDisk.N.SnapshotID parameter is specified, the DataDisk.N.Size parameter is ignored. The data disk is created based on the size of the specified snapshot. Use snapshots created after July 15, 2013. Otherwise, an error is returned and your request is rejected.",
+ "Size": "Size of the data disk N. Valid values of N: 1 to 16. Unit: GiB. Valid values:\n\nValid values when DataDisk.N.Category is set to cloud_efficiency: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud_ssd: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud_essd: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud: 5 to 2000 The value of this parameter must be greater than or equal to the size of the snapshot specified by the SnapshotID parameter.",
+ "DiskEncryption": "DiskEncryption specifies whether to encrypt data disk N.\n\nEmpty value means the platform chooses a default, which is subject to change over time. Currently the default is `disabled`.",
+ "PerformanceLevel": "PerformanceLevel is the performance level of the ESSD used as as data disk N. The N value must be the same as that in DataDisk.N.Category when DataDisk.N.Category is set to cloud_essd. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. For more information about ESSD performance levels, see ESSDs.",
+ "Category": "Category describes the type of data disk N. Valid values: cloud_efficiency: ultra disk cloud_ssd: standard SSD cloud_essd: ESSD cloud: basic disk Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.",
+ "KMSKeyID": "KMSKeyID is the ID of the Key Management Service (KMS) key to be used by data disk N. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `\"\"` which is interpreted as do not use KMSKey encryption.",
+ "DiskPreservation": "DiskPreservation specifies whether to release data disk N along with the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `DeleteWithInstance`",
+}
+
+func (DataDiskProperties) SwaggerDoc() map[string]string {
+ return map_DataDiskProperties
+}
+
+var map_SystemDiskProperties = map[string]string{
+ "": "SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category",
+ "category": "Category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.",
+ "performanceLevel": "PerformanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.",
+ "name": "Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.",
+ "size": "Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.",
+}
+
+func (SystemDiskProperties) SwaggerDoc() map[string]string {
+ return map_SystemDiskProperties
+}
+
+var map_Tag = map[string]string{
+ "": "Tag The tags of ECS Instance",
+ "Key": "Key is the name of the key pair",
+ "Value": "Value is the value or data of the key pair",
+}
+
+func (Tag) SwaggerDoc() map[string]string {
+ return map_Tag
+}
+
+var map_AWSResourceFilter = map[string]string{
+ "": "AWSResourceFilter is a filter used to identify an AWS resource",
+ "name": "Name of the filter. Filter names are case-sensitive.",
+ "values": "Values includes one or more filter values. Filter values are case-sensitive.",
+}
+
+func (AWSResourceFilter) SwaggerDoc() map[string]string {
+ return map_AWSResourceFilter
+}
+
+var map_AWSResourceReference = map[string]string{
+ "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.",
+ "type": "Type determines how the reference will fetch the AWS resource.",
+ "id": "ID of resource.",
+ "arn": "ARN of resource.",
+ "filters": "Filters is a set of filters used to identify a resource.",
+}
+
+func (AWSResourceReference) SwaggerDoc() map[string]string {
+ return map_AWSResourceReference
+}
+
+var map_AWSFailureDomain = map[string]string{
+ "": "AWSFailureDomain configures failure domain information for the AWS platform.",
+ "subnet": "Subnet is a reference to the subnet to use for this instance.",
+ "placement": "Placement configures the placement information for this instance.",
+}
+
+func (AWSFailureDomain) SwaggerDoc() map[string]string {
+ return map_AWSFailureDomain
+}
+
+var map_AWSFailureDomainPlacement = map[string]string{
+ "": "AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain.",
+ "availabilityZone": "AvailabilityZone is the availability zone of the instance.",
+}
+
+func (AWSFailureDomainPlacement) SwaggerDoc() map[string]string {
+ return map_AWSFailureDomainPlacement
+}
+
+var map_AzureFailureDomain = map[string]string{
+ "": "AzureFailureDomain configures failure domain information for the Azure platform.",
+ "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone.",
+ "subnet": "subnet is the name of the network subnet in which the VM will be created. When omitted, the subnet value from the machine providerSpec template will be used.",
+}
+
+func (AzureFailureDomain) SwaggerDoc() map[string]string {
+ return map_AzureFailureDomain
+}
+
+var map_ControlPlaneMachineSet = map[string]string{
+ "": "ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ControlPlaneMachineSet) SwaggerDoc() map[string]string {
+ return map_ControlPlaneMachineSet
+}
+
+var map_ControlPlaneMachineSetList = map[string]string{
+ "": "ControlPlaneMachineSetList contains a list of ControlPlaneMachineSet Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ControlPlaneMachineSetList) SwaggerDoc() map[string]string {
+ return map_ControlPlaneMachineSetList
+}
+
+var map_ControlPlaneMachineSetSpec = map[string]string{
+ "": "ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet.",
+ "state": "State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.",
+ "replicas": "Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.",
+ "strategy": "Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.",
+ "selector": "Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource.",
+ "template": "Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.",
+}
+
+func (ControlPlaneMachineSetSpec) SwaggerDoc() map[string]string {
+ return map_ControlPlaneMachineSetSpec
+}
+
+var map_ControlPlaneMachineSetStatus = map[string]string{
+ "": "ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD.",
+ "conditions": "Conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.",
+ "observedGeneration": "ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.",
+ "replicas": "Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.",
+ "readyReplicas": "ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.",
+ "updatedReplicas": "UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.",
+ "unavailableReplicas": "UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.",
+}
+
+func (ControlPlaneMachineSetStatus) SwaggerDoc() map[string]string {
+ return map_ControlPlaneMachineSetStatus
+}
+
+var map_ControlPlaneMachineSetStrategy = map[string]string{
+ "": "ControlPlaneMachineSetStrategy defines the strategy for applying updates to the Control Plane Machines managed by the ControlPlaneMachineSet.",
+ "type": "Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".",
+}
+
+func (ControlPlaneMachineSetStrategy) SwaggerDoc() map[string]string {
+ return map_ControlPlaneMachineSetStrategy
+}
+
+var map_ControlPlaneMachineSetTemplate = map[string]string{
+ "": "ControlPlaneMachineSetTemplate is a template used by the ControlPlaneMachineSet to create the Machines that it will manage in the future. ",
+ "machineType": "MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.",
+ "machines_v1beta1_machine_openshift_io": "OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group.",
+}
+
+func (ControlPlaneMachineSetTemplate) SwaggerDoc() map[string]string {
+ return map_ControlPlaneMachineSetTemplate
+}
+
+var map_ControlPlaneMachineSetTemplateObjectMeta = map[string]string{
+ "": "ControlPlaneMachineSetTemplateObjectMeta is a subset of the metav1.ObjectMeta struct. It allows users to specify labels and annotations that will be copied onto Machines created from this template.",
+ "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the 'machine.openshift.io/cluster-api-machine-role' and 'machine.openshift.io/cluster-api-machine-type' labels, both with a value of 'master'. It must also contain a label with the key 'machine.openshift.io/cluster-api-cluster'.",
+ "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+}
+
+func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string {
+ return map_ControlPlaneMachineSetTemplateObjectMeta
+}
+
+var map_FailureDomains = map[string]string{
+ "": "FailureDomain represents the different configurations required to spread Machines across failure domains on different platforms.",
+ "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.",
+ "aws": "AWS configures failure domain information for the AWS platform.",
+ "azure": "Azure configures failure domain information for the Azure platform.",
+ "gcp": "GCP configures failure domain information for the GCP platform.",
+ "vsphere": "vsphere configures failure domain information for the VSphere platform.",
+ "openstack": "OpenStack configures failure domain information for the OpenStack platform.",
+ "nutanix": "nutanix configures failure domain information for the Nutanix platform.",
+}
+
+func (FailureDomains) SwaggerDoc() map[string]string {
+ return map_FailureDomains
+}
+
+var map_GCPFailureDomain = map[string]string{
+ "": "GCPFailureDomain configures failure domain information for the GCP platform",
+ "zone": "Zone is the zone in which the GCP machine provider will create the VM.",
+}
+
+func (GCPFailureDomain) SwaggerDoc() map[string]string {
+ return map_GCPFailureDomain
+}
+
+var map_NutanixFailureDomainReference = map[string]string{
+ "": "NutanixFailureDomainReference refers to the failure domain of the Nutanix platform.",
+ "name": "name of the failure domain in which the nutanix machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource.",
+}
+
+func (NutanixFailureDomainReference) SwaggerDoc() map[string]string {
+ return map_NutanixFailureDomainReference
+}
+
+var map_OpenShiftMachineV1Beta1MachineTemplate = map[string]string{
+ "": "OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create Machines from the v1beta1.machine.openshift.io API group.",
+ "failureDomains": "FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.",
+ "metadata": "ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.",
+ "spec": "Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.",
+}
+
+func (OpenShiftMachineV1Beta1MachineTemplate) SwaggerDoc() map[string]string {
+ return map_OpenShiftMachineV1Beta1MachineTemplate
+}
+
+var map_OpenStackFailureDomain = map[string]string{
+ "": "OpenStackFailureDomain configures failure domain information for the OpenStack platform.",
+ "availabilityZone": "availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.",
+ "rootVolume": "rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created.",
+}
+
+func (OpenStackFailureDomain) SwaggerDoc() map[string]string {
+ return map_OpenStackFailureDomain
+}
+
+var map_RootVolume = map[string]string{
+ "": "RootVolume represents the volume metadata to boot from. The original RootVolume struct is defined in the v1alpha1 but it's not best practice to use it directly here so we define a new one that should stay in sync with the original one.",
+ "availabilityZone": "availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.",
+ "volumeType": "volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. ",
+}
+
+func (RootVolume) SwaggerDoc() map[string]string {
+ return map_RootVolume
+}
+
+var map_VSphereFailureDomain = map[string]string{
+ "": "VSphereFailureDomain configures failure domain information for the vSphere platform",
+ "name": "name of the failure domain in which the vSphere machine provider will create the VM. Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. When balancing machines across failure domains, the control plane machine set will inject configuration from the Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain.",
+}
+
+func (VSphereFailureDomain) SwaggerDoc() map[string]string {
+ return map_VSphereFailureDomain
+}
+
+var map_NutanixCategory = map[string]string{
+ "": "NutanixCategory identifies a pair of prism category key and value",
+ "key": "key is the prism category key name",
+ "value": "value is the prism category value associated with the key",
+}
+
+func (NutanixCategory) SwaggerDoc() map[string]string {
+ return map_NutanixCategory
+}
+
+var map_NutanixMachineProviderConfig = map[string]string{
+ "": "NutanixMachineProviderConfig is the Schema for the nutanixmachineproviderconfigs API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
+ "image": "image is to identify the rhcos image uploaded to the Prism Central (PC) The image identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
+ "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.",
+ "vcpusPerSocket": "vcpusPerSocket is the number of vCPUs per socket of the VM",
+ "vcpuSockets": "vcpuSockets is the number of vCPU sockets of the VM",
+ "memorySize": "memorySize is the memory size (in Quantity format) of the VM The minimum memorySize is 2Gi bytes",
+ "systemDiskSize": "systemDiskSize is size (in Quantity format) of the system disk of the VM The minimum systemDiskSize is 20Gi bytes",
+ "bootType": "bootType indicates the boot type (Legacy, UEFI or SecureBoot) the Machine's VM uses to boot. If this field is empty or omitted, the VM will use the default boot type \"Legacy\" to boot. \"SecureBoot\" depends on \"UEFI\" boot, i.e., enabling \"SecureBoot\" means that \"UEFI\" boot is also enabled.",
+ "project": "project optionally identifies a Prism project for the Machine's VM to associate with.",
+ "categories": "categories optionally adds one or more prism categories (each with key and value) for the Machine's VM to associate with. All the category key and value pairs specified must already exist in the prism central.",
+ "userDataSecret": "userDataSecret is a local reference to a secret that contains the UserData to apply to the VM",
+ "credentialsSecret": "credentialsSecret is a local reference to a secret that contains the credentials data to access Nutanix PC client",
+ "failureDomain": "failureDomain refers to the name of the FailureDomain with which this Machine is associated. If this is configured, the Nutanix machine controller will use the prism_central endpoint and credentials defined in the referenced FailureDomain to communicate to the prism_central. It will also verify that the 'cluster' and subnets' configuration in the NutanixMachineProviderConfig is consistent with that in the referenced failureDomain.",
+}
+
+func (NutanixMachineProviderConfig) SwaggerDoc() map[string]string {
+ return map_NutanixMachineProviderConfig
+}
+
+var map_NutanixMachineProviderStatus = map[string]string{
+ "": "NutanixMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains nutanix-specific status information. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status",
+ "vmUUID": "vmUUID is the Machine associated VM's UUID The field is missing before the VM is created. Once the VM is created, the field is filled with the VM's UUID and it will not change. The vmUUID is used to find the VM when updating the Machine status, and to delete the VM when the Machine is deleted.",
+}
+
+func (NutanixMachineProviderStatus) SwaggerDoc() map[string]string {
+ return map_NutanixMachineProviderStatus
+}
+
+var map_NutanixResourceIdentifier = map[string]string{
+ "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)",
+ "type": "Type is the identifier type to use for this resource.",
+ "uuid": "uuid is the UUID of the resource in the PC.",
+ "name": "name is the resource name in the PC",
+}
+
+func (NutanixResourceIdentifier) SwaggerDoc() map[string]string {
+ return map_NutanixResourceIdentifier
+}
+
+var map_LoadBalancerReference = map[string]string{
+ "": "LoadBalancerReference is a reference to a load balancer on IBM Cloud virtual private cloud(VPC).",
+ "name": "name of the LoadBalancer in IBM Cloud VPC. The name should be between 1 and 63 characters long and may consist of lowercase alphanumeric characters and hyphens only. The value must not end with a hyphen. It is a reference to existing LoadBalancer created by openshift installer component.",
+ "type": "type of the LoadBalancer service supported by IBM Cloud VPC. Currently, only Application LoadBalancer is supported. More details about Application LoadBalancer https://cloud.ibm.com/docs/vpc?topic=vpc-load-balancers-about&interface=ui Supported values are Application.",
+}
+
+func (LoadBalancerReference) SwaggerDoc() map[string]string {
+ return map_LoadBalancerReference
+}
+
+var map_PowerVSMachineProviderConfig = map[string]string{
+ "": "PowerVSMachineProviderConfig is the type that will be embedded in a Machine.Spec.ProviderSpec field for a PowerVS virtual machine. It is used by the PowerVS machine actuator to create a single Machine.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance.",
+ "credentialsSecret": "credentialsSecret is a reference to the secret with IBM Cloud credentials.",
+ "serviceInstance": "serviceInstance is the reference to the Power VS service on which the server instance(VM) will be created. Power VS service is a container for all Power VS instances at a specific geographic region. serviceInstance can be created via IBM Cloud catalog or CLI. supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. More detail about Power VS service instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server",
+ "image": "image is to identify the rhcos image uploaded to IBM COS bucket which is used to create the instance. supported image identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli.",
+ "network": "network is the reference to the Network to use for this instance. supported network identifier in PowerVSResource are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli.",
+ "keyPairName": "keyPairName is the name of the KeyPair to use for SSH. The key pair will be exposed to the instance via the instance metadata service. On boot, the OS will copy the public keypair into the authorized keys for the core user.",
+ "systemType": "systemType is the System type used to host the instance. systemType determines the number of cores and memory that is available. Few of the supported SystemTypes are s922,e880,e980. e880 systemType available only in Dallas Datacenters. e980 systemType available in Datacenters except Dallas and Washington. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is s922 which is generally available.",
+ "processorType": "processorType is the VM instance processor type. It must be set to one of the following values: Dedicated, Capped or Shared. Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. Shared: Shared among other clients. Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. if the processorType is selected as Dedicated, then processors value cannot be fractional. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Shared.",
+ "processors": "processors is the number of virtual processors in a virtual machine. when the processorType is selected as Dedicated the processors value cannot be fractional. maximum value for the Processors depends on the selected SystemType. when SystemType is set to e880 or e980 maximum Processors value is 143. when SystemType is set to s922 maximum Processors value is 15. minimum value for Processors depends on the selected ProcessorType. when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. when ProcessorType is set as Dedicated, The minimum processors is 1. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. when ProcessorType selected as Dedicated, the default is set to 1. when ProcessorType selected as Shared or Capped, the default is set to 0.5.",
+ "memoryGiB": "memoryGiB is the size of a virtual machine's memory, in GiB. maximum value for the MemoryGiB depends on the selected SystemType. when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. The minimum memory is 32 GiB. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 32.",
+ "loadBalancers": "loadBalancers is the set of load balancers to which the new control plane instance should be added once it is created.",
+}
+
+func (PowerVSMachineProviderConfig) SwaggerDoc() map[string]string {
+ return map_PowerVSMachineProviderConfig
+}
+
+var map_PowerVSMachineProviderStatus = map[string]string{
+ "": "PowerVSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains PowerVS-specific status information.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status",
+ "instanceId": "instanceId is the instance ID of the machine created in PowerVS instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service. This will help in updating or deleting a VM in Power VS Cloud",
+ "serviceInstanceID": "serviceInstanceID is the reference to the Power VS ServiceInstance on which the machine instance will be created. serviceInstanceID uniquely identifies the Power VS service By setting serviceInstanceID it will become easy and efficient to fetch a server instance(VM) within Power VS Cloud.",
+ "instanceState": "instanceState is the state of the PowerVS instance for this machine Possible instance states are Active, Build, ShutOff, Reboot This is used to display additional information to user regarding instance current state",
+}
+
+func (PowerVSMachineProviderStatus) SwaggerDoc() map[string]string {
+ return map_PowerVSMachineProviderStatus
+}
+
+var map_PowerVSResource = map[string]string{
+ "": "PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx Only one of ID, Name or RegEx may be specified. Specifying more than one will result in a validation error.",
+ "type": "Type identifies the resource type for this entry. Valid values are ID, Name and RegEx",
+ "id": "ID of resource",
+ "name": "Name of resource",
+ "regex": "Regex to find resource Regex contains the pattern to match to find a resource",
+}
+
+func (PowerVSResource) SwaggerDoc() map[string]string {
+ return map_PowerVSResource
+}
+
+var map_PowerVSSecretReference = map[string]string{
+ "": "PowerVSSecretReference contains enough information to locate the referenced secret inside the same namespace.",
+ "name": "Name of the secret.",
+}
+
+func (PowerVSSecretReference) SwaggerDoc() map[string]string {
+ return map_PowerVSSecretReference
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/doc.go b/vendor/github.com/openshift/api/machine/v1alpha1/doc.go
new file mode 100644
index 0000000000..111cacb635
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1alpha1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=machine.openshift.io
+package v1alpha1
diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/register.go b/vendor/github.com/openshift/api/machine/v1alpha1/register.go
new file mode 100644
index 0000000000..ef96c4720a
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1alpha1/register.go
@@ -0,0 +1,38 @@
+/*
+ Copyright 2022 Red Hat, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "machine.openshift.io"
+
+var (
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go
new file mode 100644
index 0000000000..da5fbc5152
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go
@@ -0,0 +1,439 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field
+// for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance.
+// +k8s:openapi-gen=true
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type OpenstackProviderSpec struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // The name of the secret containing the openstack credentials
+ CloudsSecret *corev1.SecretReference `json:"cloudsSecret"`
+
+ // The name of the cloud to use from the clouds secret
+ CloudName string `json:"cloudName"`
+
+ // The flavor reference for the flavor for your server instance.
+ Flavor string `json:"flavor"`
+
+ // The name of the image to use for your server instance.
+ // If the RootVolume is specified, this will be ignored and use rootVolume directly.
+ Image string `json:"image"`
+
+ // The ssh key to inject in the instance
+ KeyName string `json:"keyName,omitempty"`
+
+ // The machine ssh username
+ SshUserName string `json:"sshUserName,omitempty"`
+
+ // A networks object. Required parameter when there are multiple networks defined for the tenant.
+ // When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.
+ Networks []NetworkParam `json:"networks,omitempty"`
+
+ // Create and assign additional ports to instances
+ Ports []PortOpts `json:"ports,omitempty"`
+
+ // floatingIP specifies a floating IP to be associated with the machine.
+ // Note that it is not safe to use this parameter in a MachineSet, as
+ // only one Machine may be assigned the same floating IP.
+ //
+ // Deprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.
+ FloatingIP string `json:"floatingIP,omitempty"`
+
+ // The availability zone from which to launch the server.
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+
+ // The names of the security groups to assign to the instance
+ SecurityGroups []SecurityGroupParam `json:"securityGroups,omitempty"`
+
+ // The name of the secret containing the user data (startup script in most cases)
+ UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"`
+
+ // Whether the server instance is created on a trunk port or not.
+ Trunk bool `json:"trunk,omitempty"`
+
+ // Machine tags
+ // Requires Nova api 2.52 minimum!
+ Tags []string `json:"tags,omitempty"`
+
+ // Metadata mapping. Allows you to create a map of key value pairs to add to the server instance.
+ ServerMetadata map[string]string `json:"serverMetadata,omitempty"`
+
+ // Config Drive support
+ ConfigDrive *bool `json:"configDrive,omitempty"`
+
+ // The volume metadata to boot from
+ RootVolume *RootVolume `json:"rootVolume,omitempty"`
+
+ // additionalBlockDevices is a list of specifications for additional block devices to attach to the server instance
+ // +optional
+ // +listType=map
+ // +listMapKey=name
+ AdditionalBlockDevices []AdditionalBlockDevice `json:"additionalBlockDevices,omitempty"`
+
+ // The server group to assign the machine to.
+ ServerGroupID string `json:"serverGroupID,omitempty"`
+
+ // The server group to assign the machine to. A server group with that
+ // name will be created if it does not exist. If both ServerGroupID and
+ // ServerGroupName are non-empty, they must refer to the same OpenStack
+ // resource.
+ ServerGroupName string `json:"serverGroupName,omitempty"`
+
+ // The subnet that a set of machines will get ingress/egress traffic from
+ PrimarySubnet string `json:"primarySubnet,omitempty"`
+}
+
+type SecurityGroupParam struct {
+ // Security Group UUID
+ UUID string `json:"uuid,omitempty"`
+ // Security Group name
+ Name string `json:"name,omitempty"`
+ // Filters used to query security groups in openstack
+ Filter SecurityGroupFilter `json:"filter,omitempty"`
+}
+
+type SecurityGroupFilter struct {
+ // id specifies the ID of a security group to use. If set, id will not
+ // be validated before use. An invalid id will result in failure to
+ // create a server with an appropriate error message.
+ ID string `json:"id,omitempty"`
+ // name filters security groups by name.
+ Name string `json:"name,omitempty"`
+ // description filters security groups by description.
+ Description string `json:"description,omitempty"`
+ // tenantId filters security groups by tenant ID.
+ // Deprecated: use projectId instead. tenantId will be ignored if projectId is set.
+ TenantID string `json:"tenantId,omitempty"`
+ // projectId filters security groups by project ID.
+ ProjectID string `json:"projectId,omitempty"`
+ // tags filters by security groups containing all specified tags.
+ // Multiple tags are comma separated.
+ Tags string `json:"tags,omitempty"`
+ // tagsAny filters by security groups containing any specified tags.
+ // Multiple tags are comma separated.
+ TagsAny string `json:"tagsAny,omitempty"`
+ // notTags filters by security groups which don't match all specified tags. NOT (t1 AND t2...)
+ // Multiple tags are comma separated.
+ NotTags string `json:"notTags,omitempty"`
+ // notTagsAny filters by security groups which don't match any specified tags. NOT (t1 OR t2...)
+ // Multiple tags are comma separated.
+ NotTagsAny string `json:"notTagsAny,omitempty"`
+
+ // Deprecated: limit is silently ignored. It has no replacement.
+ DeprecatedLimit int `json:"limit,omitempty"`
+ // Deprecated: marker is silently ignored. It has no replacement.
+ DeprecatedMarker string `json:"marker,omitempty"`
+ // Deprecated: sortKey is silently ignored. It has no replacement.
+ DeprecatedSortKey string `json:"sortKey,omitempty"`
+ // Deprecated: sortDir is silently ignored. It has no replacement.
+ DeprecatedSortDir string `json:"sortDir,omitempty"`
+}
+
+type NetworkParam struct {
+ // The UUID of the network. Required if you omit the port attribute.
+ UUID string `json:"uuid,omitempty"`
+ // A fixed IPv4 address for the NIC.
+ FixedIp string `json:"fixedIp,omitempty"`
+ // Filters for optional network query
+ Filter Filter `json:"filter,omitempty"`
+ // Subnet within a network to use
+ Subnets []SubnetParam `json:"subnets,omitempty"`
+ // NoAllowedAddressPairs disables creation of allowed address pairs for the network ports
+ NoAllowedAddressPairs bool `json:"noAllowedAddressPairs,omitempty"`
+ // PortTags allows users to specify a list of tags to add to ports created in a given network
+ PortTags []string `json:"portTags,omitempty"`
+ // The virtual network interface card (vNIC) type that is bound to the
+ // neutron port.
+ VNICType string `json:"vnicType,omitempty"`
+ // A dictionary that enables the application running on the specified
+ // host to pass and receive virtual network interface (VIF) port-specific
+ // information to the plug-in.
+ Profile map[string]string `json:"profile,omitempty"`
+ // PortSecurity optionally enables or disables security on ports managed by OpenStack
+ PortSecurity *bool `json:"portSecurity,omitempty"`
+}
+
+type Filter struct {
+ // Deprecated: use NetworkParam.uuid instead. Ignored if NetworkParam.uuid is set.
+ ID string `json:"id,omitempty"`
+ // name filters networks by name.
+ Name string `json:"name,omitempty"`
+ // description filters networks by description.
+ Description string `json:"description,omitempty"`
+ // tenantId filters networks by tenant ID.
+ // Deprecated: use projectId instead. tenantId will be ignored if projectId is set.
+ TenantID string `json:"tenantId,omitempty"`
+ // projectId filters networks by project ID.
+ ProjectID string `json:"projectId,omitempty"`
+ // tags filters by networks containing all specified tags.
+ // Multiple tags are comma separated.
+ Tags string `json:"tags,omitempty"`
+ // tagsAny filters by networks containing any specified tags.
+ // Multiple tags are comma separated.
+ TagsAny string `json:"tagsAny,omitempty"`
+ // notTags filters by networks which don't match all specified tags. NOT (t1 AND t2...)
+ // Multiple tags are comma separated.
+ NotTags string `json:"notTags,omitempty"`
+ // notTagsAny filters by networks which don't match any specified tags. NOT (t1 OR t2...)
+ // Multiple tags are comma separated.
+ NotTagsAny string `json:"notTagsAny,omitempty"`
+
+ // Deprecated: status is silently ignored. It has no replacement.
+ DeprecatedStatus string `json:"status,omitempty"`
+ // Deprecated: adminStateUp is silently ignored. It has no replacement.
+ DeprecatedAdminStateUp *bool `json:"adminStateUp,omitempty"`
+ // Deprecated: shared is silently ignored. It has no replacement.
+ DeprecatedShared *bool `json:"shared,omitempty"`
+ // Deprecated: marker is silently ignored. It has no replacement.
+ DeprecatedMarker string `json:"marker,omitempty"`
+ // Deprecated: limit is silently ignored. It has no replacement.
+ DeprecatedLimit int `json:"limit,omitempty"`
+ // Deprecated: sortKey is silently ignored. It has no replacement.
+ DeprecatedSortKey string `json:"sortKey,omitempty"`
+ // Deprecated: sortDir is silently ignored. It has no replacement.
+ DeprecatedSortDir string `json:"sortDir,omitempty"`
+}
+
+type SubnetParam struct {
+ // The UUID of the network. Required if you omit the port attribute.
+ UUID string `json:"uuid,omitempty"`
+
+ // Filters for optional network query
+ Filter SubnetFilter `json:"filter,omitempty"`
+
+ // PortTags are tags that are added to ports created on this subnet
+ PortTags []string `json:"portTags,omitempty"`
+
+ // PortSecurity optionally enables or disables security on ports managed by OpenStack
+ PortSecurity *bool `json:"portSecurity,omitempty"`
+}
+
+type SubnetFilter struct {
+ // id is the uuid of a specific subnet to use. If specified, id will not
+ // be validated. Instead server creation will fail with an appropriate
+ // error.
+ ID string `json:"id,omitempty"`
+ // name filters subnets by name.
+ Name string `json:"name,omitempty"`
+ // description filters subnets by description.
+ Description string `json:"description,omitempty"`
+ // Deprecated: networkId is silently ignored. Set uuid on the containing network definition instead.
+ NetworkID string `json:"networkId,omitempty"`
+ // tenantId filters subnets by tenant ID.
+ // Deprecated: use projectId instead. tenantId will be ignored if projectId is set.
+ TenantID string `json:"tenantId,omitempty"`
+ // projectId filters subnets by project ID.
+ ProjectID string `json:"projectId,omitempty"`
+ // ipVersion filters subnets by IP version.
+ IPVersion int `json:"ipVersion,omitempty"`
+ // gateway_ip filters subnets by gateway IP.
+ GatewayIP string `json:"gateway_ip,omitempty"`
+ // cidr filters subnets by CIDR.
+ CIDR string `json:"cidr,omitempty"`
+ // ipv6AddressMode filters subnets by IPv6 address mode.
+ IPv6AddressMode string `json:"ipv6AddressMode,omitempty"`
+ // ipv6RaMode filters subnets by IPv6 router adversiement mode.
+ IPv6RAMode string `json:"ipv6RaMode,omitempty"`
+ // subnetpoolId filters subnets by subnet pool ID.
+ SubnetPoolID string `json:"subnetpoolId,omitempty"`
+ // tags filters by subnets containing all specified tags.
+ // Multiple tags are comma separated.
+ Tags string `json:"tags,omitempty"`
+ // tagsAny filters by subnets containing any specified tags.
+ // Multiple tags are comma separated.
+ TagsAny string `json:"tagsAny,omitempty"`
+ // notTags filters by subnets which don't match all specified tags. NOT (t1 AND t2...)
+ // Multiple tags are comma separated.
+ NotTags string `json:"notTags,omitempty"`
+ // notTagsAny filters by subnets which don't match any specified tags. NOT (t1 OR t2...)
+ // Multiple tags are comma separated.
+ NotTagsAny string `json:"notTagsAny,omitempty"`
+
+ // Deprecated: enableDhcp is silently ignored. It has no replacement.
+ DeprecatedEnableDHCP *bool `json:"enableDhcp,omitempty"`
+ // Deprecated: limit is silently ignored. It has no replacement.
+ DeprecatedLimit int `json:"limit,omitempty"`
+ // Deprecated: marker is silently ignored. It has no replacement.
+ DeprecatedMarker string `json:"marker,omitempty"`
+ // Deprecated: sortKey is silently ignored. It has no replacement.
+ DeprecatedSortKey string `json:"sortKey,omitempty"`
+ // Deprecated: sortDir is silently ignored. It has no replacement.
+ DeprecatedSortDir string `json:"sortDir,omitempty"`
+}
+
+type PortOpts struct {
+ // networkID is the ID of the network the port will be created in. It is required.
+ // +required
+ NetworkID string `json:"networkID"`
+ // If nameSuffix is specified the created port will be named -.
+ // If not specified the port will be named -.
+ NameSuffix string `json:"nameSuffix,omitempty"`
+ // description specifies the description of the created port.
+ Description string `json:"description,omitempty"`
+ // adminStateUp sets the administrative state of the created port to up (true), or down (false).
+ AdminStateUp *bool `json:"adminStateUp,omitempty"`
+ // macAddress specifies the MAC address of the created port.
+ MACAddress string `json:"macAddress,omitempty"`
+ // fixedIPs specifies a set of fixed IPs to assign to the port. They must all be valid for the port's network.
+ FixedIPs []FixedIPs `json:"fixedIPs,omitempty"`
+ // tenantID specifies the tenant ID of the created port. Note that this
+ // requires OpenShift to have administrative permissions, which is
+ // typically not the case. Use of this field is not recommended.
+ // Deprecated: use projectID instead. It will be ignored if projectID is set.
+ TenantID string `json:"tenantID,omitempty"`
+ // projectID specifies the project ID of the created port. Note that this
+ // requires OpenShift to have administrative permissions, which is
+ // typically not the case. Use of this field is not recommended.
+ ProjectID string `json:"projectID,omitempty"`
+ // securityGroups specifies a set of security group UUIDs to use instead
+ // of the machine's default security groups. The default security groups
+ // will be used if this is left empty or not specified.
+ SecurityGroups *[]string `json:"securityGroups,omitempty"`
+ // allowedAddressPairs specifies a set of allowed address pairs to add to the port.
+ AllowedAddressPairs []AddressPair `json:"allowedAddressPairs,omitempty"`
+ // tags species a set of tags to add to the port.
+ Tags []string `json:"tags,omitempty"`
+ // The virtual network interface card (vNIC) type that is bound to the
+ // neutron port.
+ VNICType string `json:"vnicType,omitempty"`
+ // A dictionary that enables the application running on the specified
+ // host to pass and receive virtual network interface (VIF) port-specific
+ // information to the plug-in.
+ Profile map[string]string `json:"profile,omitempty"`
+ // enable or disable security on a given port
+ // incompatible with securityGroups and allowedAddressPairs
+ PortSecurity *bool `json:"portSecurity,omitempty"`
+ // Enables and disables trunk at port level. If not provided, openStackMachine.Spec.Trunk is inherited.
+ Trunk *bool `json:"trunk,omitempty"`
+
+ // The ID of the host where the port is allocated. Do not use this
+ // field: it cannot be used correctly.
+ // Deprecated: hostID is silently ignored. It will be removed with no replacement.
+ DeprecatedHostID string `json:"hostID,omitempty"`
+}
+
+type AddressPair struct {
+ IPAddress string `json:"ipAddress,omitempty"`
+ MACAddress string `json:"macAddress,omitempty"`
+}
+
+type FixedIPs struct {
+ // subnetID specifies the ID of the subnet where the fixed IP will be allocated.
+ SubnetID string `json:"subnetID"`
+ // ipAddress is a specific IP address to use in the given subnet. Port
+ // creation will fail if the address is not available. If not specified,
+ // an available IP from the given subnet will be selected automatically.
+ IPAddress string `json:"ipAddress,omitempty"`
+}
+
+type RootVolume struct {
+ // sourceUUID specifies the UUID of a glance image used to populate the root volume.
+ // Deprecated: set image in the platform spec instead. This will be
+ // ignored if image is set in the platform spec.
+ SourceUUID string `json:"sourceUUID,omitempty"`
+ // volumeType specifies a volume type to use when creating the root
+ // volume. If not specified the default volume type will be used.
+ VolumeType string `json:"volumeType,omitempty"`
+ // diskSize specifies the size, in GB, of the created root volume.
+ Size int `json:"diskSize,omitempty"`
+ // availabilityZone specifies the Cinder availability where the root volume will be created.
+ Zone string `json:"availabilityZone,omitempty"`
+
+ // Deprecated: sourceType will be silently ignored. There is no replacement.
+ DeprecatedSourceType string `json:"sourceType,omitempty"`
+ // Deprecated: deviceType will be silently ignored. There is no replacement.
+ DeprecatedDeviceType string `json:"deviceType,omitempty"`
+}
+
+// blockDeviceStorage is the storage type of a block device to create and
+// contains additional storage options.
+// +union
+type BlockDeviceStorage struct {
+ // type is the type of block device to create.
+ // This can be either "Volume" or "Local".
+ // +kubebuilder:validation:Required
+ // +unionDiscriminator
+ Type BlockDeviceType `json:"type"`
+
+ // volume contains additional storage options for a volume block device.
+ // +optional
+ // +unionMember,optional
+ Volume *BlockDeviceVolume `json:"volume,omitempty"`
+}
+
+// blockDeviceVolume contains additional storage options for a volume block device.
+type BlockDeviceVolume struct {
+ // type is the Cinder volume type of the volume.
+ // If omitted, the default Cinder volume type that is configured in the OpenStack cloud
+ // will be used.
+ // +optional
+ Type string `json:"type,omitempty"`
+
+ // availabilityZone is the volume availability zone to create the volume in.
+ // If omitted, the availability zone of the server will be used.
+ // The availability zone must NOT contain spaces otherwise it will lead to volume that belongs
+ // to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for
+ // further information.
+ // +optional
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+}
+
+// additionalBlockDevice is a block device to attach to the server.
+type AdditionalBlockDevice struct {
+ // name of the block device in the context of a machine.
+ // If the block device is a volume, the Cinder volume will be named
+ // as a combination of the machine name and this name.
+ // Also, this name will be used for tagging the block device.
+ // Information about the block device tag can be obtained from the OpenStack
+ // metadata API or the config drive.
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+
+ // sizeGiB is the size of the block device in gibibytes (GiB).
+ // +kubebuilder:validation:Required
+ SizeGiB int `json:"sizeGiB"`
+
+ // storage specifies the storage type of the block device and
+ // additional storage options.
+ // +kubebuilder:validation:Required
+ Storage BlockDeviceStorage `json:"storage"`
+}
+
+// BlockDeviceType defines the type of block device to create.
+type BlockDeviceType string
+
+const (
+ // LocalBlockDevice is an ephemeral block device attached to the server.
+ LocalBlockDevice BlockDeviceType = "Local"
+
+ // VolumeBlockDevice is a volume block device attached to the server.
+ VolumeBlockDevice BlockDeviceType = "Volume"
+)
diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..f61b35ab44
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,407 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdditionalBlockDevice) DeepCopyInto(out *AdditionalBlockDevice) {
+ *out = *in
+ in.Storage.DeepCopyInto(&out.Storage)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalBlockDevice.
+func (in *AdditionalBlockDevice) DeepCopy() *AdditionalBlockDevice {
+ if in == nil {
+ return nil
+ }
+ out := new(AdditionalBlockDevice)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AddressPair) DeepCopyInto(out *AddressPair) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressPair.
+func (in *AddressPair) DeepCopy() *AddressPair {
+ if in == nil {
+ return nil
+ }
+ out := new(AddressPair)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BlockDeviceStorage) DeepCopyInto(out *BlockDeviceStorage) {
+ *out = *in
+ if in.Volume != nil {
+ in, out := &in.Volume, &out.Volume
+ *out = new(BlockDeviceVolume)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceStorage.
+func (in *BlockDeviceStorage) DeepCopy() *BlockDeviceStorage {
+ if in == nil {
+ return nil
+ }
+ out := new(BlockDeviceStorage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BlockDeviceVolume) DeepCopyInto(out *BlockDeviceVolume) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceVolume.
+func (in *BlockDeviceVolume) DeepCopy() *BlockDeviceVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(BlockDeviceVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Filter) DeepCopyInto(out *Filter) {
+ *out = *in
+ if in.DeprecatedAdminStateUp != nil {
+ in, out := &in.DeprecatedAdminStateUp, &out.DeprecatedAdminStateUp
+ *out = new(bool)
+ **out = **in
+ }
+ if in.DeprecatedShared != nil {
+ in, out := &in.DeprecatedShared, &out.DeprecatedShared
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter.
+func (in *Filter) DeepCopy() *Filter {
+ if in == nil {
+ return nil
+ }
+ out := new(Filter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FixedIPs) DeepCopyInto(out *FixedIPs) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedIPs.
+func (in *FixedIPs) DeepCopy() *FixedIPs {
+ if in == nil {
+ return nil
+ }
+ out := new(FixedIPs)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkParam) DeepCopyInto(out *NetworkParam) {
+ *out = *in
+ in.Filter.DeepCopyInto(&out.Filter)
+ if in.Subnets != nil {
+ in, out := &in.Subnets, &out.Subnets
+ *out = make([]SubnetParam, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.PortTags != nil {
+ in, out := &in.PortTags, &out.PortTags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Profile != nil {
+ in, out := &in.Profile, &out.Profile
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.PortSecurity != nil {
+ in, out := &in.PortSecurity, &out.PortSecurity
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParam.
+func (in *NetworkParam) DeepCopy() *NetworkParam {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkParam)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.CloudsSecret != nil {
+ in, out := &in.CloudsSecret, &out.CloudsSecret
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.Networks != nil {
+ in, out := &in.Networks, &out.Networks
+ *out = make([]NetworkParam, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]PortOpts, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make([]SecurityGroupParam, len(*in))
+ copy(*out, *in)
+ }
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServerMetadata != nil {
+ in, out := &in.ServerMetadata, &out.ServerMetadata
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ConfigDrive != nil {
+ in, out := &in.ConfigDrive, &out.ConfigDrive
+ *out = new(bool)
+ **out = **in
+ }
+ if in.RootVolume != nil {
+ in, out := &in.RootVolume, &out.RootVolume
+ *out = new(RootVolume)
+ **out = **in
+ }
+ if in.AdditionalBlockDevices != nil {
+ in, out := &in.AdditionalBlockDevices, &out.AdditionalBlockDevices
+ *out = make([]AdditionalBlockDevice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackProviderSpec.
+func (in *OpenstackProviderSpec) DeepCopy() *OpenstackProviderSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenstackProviderSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenstackProviderSpec) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortOpts) DeepCopyInto(out *PortOpts) {
+ *out = *in
+ if in.AdminStateUp != nil {
+ in, out := &in.AdminStateUp, &out.AdminStateUp
+ *out = new(bool)
+ **out = **in
+ }
+ if in.FixedIPs != nil {
+ in, out := &in.FixedIPs, &out.FixedIPs
+ *out = make([]FixedIPs, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = new([]string)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ }
+ if in.AllowedAddressPairs != nil {
+ in, out := &in.AllowedAddressPairs, &out.AllowedAddressPairs
+ *out = make([]AddressPair, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Profile != nil {
+ in, out := &in.Profile, &out.Profile
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.PortSecurity != nil {
+ in, out := &in.PortSecurity, &out.PortSecurity
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Trunk != nil {
+ in, out := &in.Trunk, &out.Trunk
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortOpts.
+func (in *PortOpts) DeepCopy() *PortOpts {
+ if in == nil {
+ return nil
+ }
+ out := new(PortOpts)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RootVolume) DeepCopyInto(out *RootVolume) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume.
+func (in *RootVolume) DeepCopy() *RootVolume {
+ if in == nil {
+ return nil
+ }
+ out := new(RootVolume)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecurityGroupFilter) DeepCopyInto(out *SecurityGroupFilter) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupFilter.
+func (in *SecurityGroupFilter) DeepCopy() *SecurityGroupFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(SecurityGroupFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecurityGroupParam) DeepCopyInto(out *SecurityGroupParam) {
+ *out = *in
+ out.Filter = in.Filter
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupParam.
+func (in *SecurityGroupParam) DeepCopy() *SecurityGroupParam {
+ if in == nil {
+ return nil
+ }
+ out := new(SecurityGroupParam)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubnetFilter) DeepCopyInto(out *SubnetFilter) {
+ *out = *in
+ if in.DeprecatedEnableDHCP != nil {
+ in, out := &in.DeprecatedEnableDHCP, &out.DeprecatedEnableDHCP
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetFilter.
+func (in *SubnetFilter) DeepCopy() *SubnetFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(SubnetFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubnetParam) DeepCopyInto(out *SubnetParam) {
+ *out = *in
+ in.Filter.DeepCopyInto(&out.Filter)
+ if in.PortTags != nil {
+ in, out := &in.PortTags, &out.PortTags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PortSecurity != nil {
+ in, out := &in.PortSecurity, &out.PortSecurity
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParam.
+func (in *SubnetParam) DeepCopy() *SubnetParam {
+ if in == nil {
+ return nil
+ }
+ out := new(SubnetParam)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..c8094eb269
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,228 @@
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AdditionalBlockDevice = map[string]string{
+ "": "additionalBlockDevice is a block device to attach to the server.",
+ "name": "name of the block device in the context of a machine. If the block device is a volume, the Cinder volume will be named as a combination of the machine name and this name. Also, this name will be used for tagging the block device. Information about the block device tag can be obtained from the OpenStack metadata API or the config drive.",
+ "sizeGiB": "sizeGiB is the size of the block device in gibibytes (GiB).",
+ "storage": "storage specifies the storage type of the block device and additional storage options.",
+}
+
+func (AdditionalBlockDevice) SwaggerDoc() map[string]string {
+ return map_AdditionalBlockDevice
+}
+
+var map_BlockDeviceStorage = map[string]string{
+ "": "blockDeviceStorage is the storage type of a block device to create and contains additional storage options.",
+ "type": "type is the type of block device to create. This can be either \"Volume\" or \"Local\".",
+ "volume": "volume contains additional storage options for a volume block device.",
+}
+
+func (BlockDeviceStorage) SwaggerDoc() map[string]string {
+ return map_BlockDeviceStorage
+}
+
+var map_BlockDeviceVolume = map[string]string{
+ "": "blockDeviceVolume contains additional storage options for a volume block device.",
+ "type": "type is the Cinder volume type of the volume. If omitted, the default Cinder volume type that is configured in the OpenStack cloud will be used.",
+ "availabilityZone": "availabilityZone is the volume availability zone to create the volume in. If omitted, the availability zone of the server will be used. The availability zone must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information.",
+}
+
+func (BlockDeviceVolume) SwaggerDoc() map[string]string {
+ return map_BlockDeviceVolume
+}
+
+var map_Filter = map[string]string{
+ "id": "Deprecated: use NetworkParam.uuid instead. Ignored if NetworkParam.uuid is set.",
+ "name": "name filters networks by name.",
+ "description": "description filters networks by description.",
+ "tenantId": "tenantId filters networks by tenant ID. Deprecated: use projectId instead. tenantId will be ignored if projectId is set.",
+ "projectId": "projectId filters networks by project ID.",
+ "tags": "tags filters by networks containing all specified tags. Multiple tags are comma separated.",
+ "tagsAny": "tagsAny filters by networks containing any specified tags. Multiple tags are comma separated.",
+ "notTags": "notTags filters by networks which don't match all specified tags. NOT (t1 AND t2...) Multiple tags are comma separated.",
+ "notTagsAny": "notTagsAny filters by networks which don't match any specified tags. NOT (t1 OR t2...) Multiple tags are comma separated.",
+ "status": "Deprecated: status is silently ignored. It has no replacement.",
+ "adminStateUp": "Deprecated: adminStateUp is silently ignored. It has no replacement.",
+ "shared": "Deprecated: shared is silently ignored. It has no replacement.",
+ "marker": "Deprecated: marker is silently ignored. It has no replacement.",
+ "limit": "Deprecated: limit is silently ignored. It has no replacement.",
+ "sortKey": "Deprecated: sortKey is silently ignored. It has no replacement.",
+ "sortDir": "Deprecated: sortDir is silently ignored. It has no replacement.",
+}
+
+func (Filter) SwaggerDoc() map[string]string {
+ return map_Filter
+}
+
+var map_FixedIPs = map[string]string{
+ "subnetID": "subnetID specifies the ID of the subnet where the fixed IP will be allocated.",
+ "ipAddress": "ipAddress is a specific IP address to use in the given subnet. Port creation will fail if the address is not available. If not specified, an available IP from the given subnet will be selected automatically.",
+}
+
+func (FixedIPs) SwaggerDoc() map[string]string {
+ return map_FixedIPs
+}
+
+var map_NetworkParam = map[string]string{
+ "uuid": "The UUID of the network. Required if you omit the port attribute.",
+ "fixedIp": "A fixed IPv4 address for the NIC.",
+ "filter": "Filters for optional network query",
+ "subnets": "Subnet within a network to use",
+ "noAllowedAddressPairs": "NoAllowedAddressPairs disables creation of allowed address pairs for the network ports",
+ "portTags": "PortTags allows users to specify a list of tags to add to ports created in a given network",
+ "vnicType": "The virtual network interface card (vNIC) type that is bound to the neutron port.",
+ "profile": "A dictionary that enables the application running on the specified host to pass and receive virtual network interface (VIF) port-specific information to the plug-in.",
+ "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack",
+}
+
+func (NetworkParam) SwaggerDoc() map[string]string {
+ return map_NetworkParam
+}
+
+var map_OpenstackProviderSpec = map[string]string{
+ "": "OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "cloudsSecret": "The name of the secret containing the openstack credentials",
+ "cloudName": "The name of the cloud to use from the clouds secret",
+ "flavor": "The flavor reference for the flavor for your server instance.",
+ "image": "The name of the image to use for your server instance. If the RootVolume is specified, this will be ignored and use rootVolume directly.",
+ "keyName": "The ssh key to inject in the instance",
+ "sshUserName": "The machine ssh username",
+ "networks": "A networks object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.",
+ "ports": "Create and assign additional ports to instances",
+ "floatingIP": "floatingIP specifies a floating IP to be associated with the machine. Note that it is not safe to use this parameter in a MachineSet, as only one Machine may be assigned the same floating IP.\n\nDeprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.",
+ "availabilityZone": "The availability zone from which to launch the server.",
+ "securityGroups": "The names of the security groups to assign to the instance",
+ "userDataSecret": "The name of the secret containing the user data (startup script in most cases)",
+ "trunk": "Whether the server instance is created on a trunk port or not.",
+ "tags": "Machine tags Requires Nova api 2.52 minimum!",
+ "serverMetadata": "Metadata mapping. Allows you to create a map of key value pairs to add to the server instance.",
+ "configDrive": "Config Drive support",
+ "rootVolume": "The volume metadata to boot from",
+ "additionalBlockDevices": "additionalBlockDevices is a list of specifications for additional block devices to attach to the server instance",
+ "serverGroupID": "The server group to assign the machine to.",
+ "serverGroupName": "The server group to assign the machine to. A server group with that name will be created if it does not exist. If both ServerGroupID and ServerGroupName are non-empty, they must refer to the same OpenStack resource.",
+ "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from",
+}
+
+func (OpenstackProviderSpec) SwaggerDoc() map[string]string {
+ return map_OpenstackProviderSpec
+}
+
+var map_PortOpts = map[string]string{
+ "networkID": "networkID is the ID of the network the port will be created in. It is required.",
+ "nameSuffix": "If nameSuffix is specified the created port will be named -. If not specified the port will be named -.",
+ "description": "description specifies the description of the created port.",
+ "adminStateUp": "adminStateUp sets the administrative state of the created port to up (true), or down (false).",
+ "macAddress": "macAddress specifies the MAC address of the created port.",
+ "fixedIPs": "fixedIPs specifies a set of fixed IPs to assign to the port. They must all be valid for the port's network.",
+ "tenantID": "tenantID specifies the tenant ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: use projectID instead. It will be ignored if projectID is set.",
+ "projectID": "projectID specifies the project ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended.",
+ "securityGroups": "securityGroups specifies a set of security group UUIDs to use instead of the machine's default security groups. The default security groups will be used if this is left empty or not specified.",
+ "allowedAddressPairs": "allowedAddressPairs specifies a set of allowed address pairs to add to the port.",
+ "tags": "tags species a set of tags to add to the port.",
+ "vnicType": "The virtual network interface card (vNIC) type that is bound to the neutron port.",
+ "profile": "A dictionary that enables the application running on the specified host to pass and receive virtual network interface (VIF) port-specific information to the plug-in.",
+ "portSecurity": "enable or disable security on a given port incompatible with securityGroups and allowedAddressPairs",
+ "trunk": "Enables and disables trunk at port level. If not provided, openStackMachine.Spec.Trunk is inherited.",
+ "hostID": "The ID of the host where the port is allocated. Do not use this field: it cannot be used correctly. Deprecated: hostID is silently ignored. It will be removed with no replacement.",
+}
+
+func (PortOpts) SwaggerDoc() map[string]string {
+ return map_PortOpts
+}
+
+var map_RootVolume = map[string]string{
+ "sourceUUID": "sourceUUID specifies the UUID of a glance image used to populate the root volume. Deprecated: set image in the platform spec instead. This will be ignored if image is set in the platform spec.",
+ "volumeType": "volumeType specifies a volume type to use when creating the root volume. If not specified the default volume type will be used.",
+ "diskSize": "diskSize specifies the size, in GB, of the created root volume.",
+ "availabilityZone": "availabilityZone specifies the Cinder availability where the root volume will be created.",
+ "sourceType": "Deprecated: sourceType will be silently ignored. There is no replacement.",
+ "deviceType": "Deprecated: deviceType will be silently ignored. There is no replacement.",
+}
+
+func (RootVolume) SwaggerDoc() map[string]string {
+ return map_RootVolume
+}
+
+var map_SecurityGroupFilter = map[string]string{
+ "id": "id specifies the ID of a security group to use. If set, id will not be validated before use. An invalid id will result in failure to create a server with an appropriate error message.",
+ "name": "name filters security groups by name.",
+ "description": "description filters security groups by description.",
+ "tenantId": "tenantId filters security groups by tenant ID. Deprecated: use projectId instead. tenantId will be ignored if projectId is set.",
+ "projectId": "projectId filters security groups by project ID.",
+ "tags": "tags filters by security groups containing all specified tags. Multiple tags are comma separated.",
+ "tagsAny": "tagsAny filters by security groups containing any specified tags. Multiple tags are comma separated.",
+ "notTags": "notTags filters by security groups which don't match all specified tags. NOT (t1 AND t2...) Multiple tags are comma separated.",
+ "notTagsAny": "notTagsAny filters by security groups which don't match any specified tags. NOT (t1 OR t2...) Multiple tags are comma separated.",
+ "limit": "Deprecated: limit is silently ignored. It has no replacement.",
+ "marker": "Deprecated: marker is silently ignored. It has no replacement.",
+ "sortKey": "Deprecated: sortKey is silently ignored. It has no replacement.",
+ "sortDir": "Deprecated: sortDir is silently ignored. It has no replacement.",
+}
+
+func (SecurityGroupFilter) SwaggerDoc() map[string]string {
+ return map_SecurityGroupFilter
+}
+
+var map_SecurityGroupParam = map[string]string{
+ "uuid": "Security Group UUID",
+ "name": "Security Group name",
+ "filter": "Filters used to query security groups in openstack",
+}
+
+func (SecurityGroupParam) SwaggerDoc() map[string]string {
+ return map_SecurityGroupParam
+}
+
+var map_SubnetFilter = map[string]string{
+ "id": "id is the uuid of a specific subnet to use. If specified, id will not be validated. Instead server creation will fail with an appropriate error.",
+ "name": "name filters subnets by name.",
+ "description": "description filters subnets by description.",
+ "networkId": "Deprecated: networkId is silently ignored. Set uuid on the containing network definition instead.",
+ "tenantId": "tenantId filters subnets by tenant ID. Deprecated: use projectId instead. tenantId will be ignored if projectId is set.",
+ "projectId": "projectId filters subnets by project ID.",
+ "ipVersion": "ipVersion filters subnets by IP version.",
+ "gateway_ip": "gateway_ip filters subnets by gateway IP.",
+ "cidr": "cidr filters subnets by CIDR.",
+ "ipv6AddressMode": "ipv6AddressMode filters subnets by IPv6 address mode.",
+ "ipv6RaMode": "ipv6RaMode filters subnets by IPv6 router adversiement mode.",
+ "subnetpoolId": "subnetpoolId filters subnets by subnet pool ID.",
+ "tags": "tags filters by subnets containing all specified tags. Multiple tags are comma separated.",
+ "tagsAny": "tagsAny filters by subnets containing any specified tags. Multiple tags are comma separated.",
+ "notTags": "notTags filters by subnets which don't match all specified tags. NOT (t1 AND t2...) Multiple tags are comma separated.",
+ "notTagsAny": "notTagsAny filters by subnets which don't match any specified tags. NOT (t1 OR t2...) Multiple tags are comma separated.",
+ "enableDhcp": "Deprecated: enableDhcp is silently ignored. It has no replacement.",
+ "limit": "Deprecated: limit is silently ignored. It has no replacement.",
+ "marker": "Deprecated: marker is silently ignored. It has no replacement.",
+ "sortKey": "Deprecated: sortKey is silently ignored. It has no replacement.",
+ "sortDir": "Deprecated: sortDir is silently ignored. It has no replacement.",
+}
+
+func (SubnetFilter) SwaggerDoc() map[string]string {
+ return map_SubnetFilter
+}
+
+var map_SubnetParam = map[string]string{
+ "uuid": "The UUID of the network. Required if you omit the port attribute.",
+ "filter": "Filters for optional network query",
+ "portTags": "PortTags are tags that are added to ports created on this subnet",
+ "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack",
+}
+
+func (SubnetParam) SwaggerDoc() map[string]string {
+ return map_SubnetParam
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/Makefile b/vendor/github.com/openshift/api/machine/v1beta1/Makefile
new file mode 100644
index 0000000000..fee9e68fcc
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="machine.openshift.io/v1beta1"
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/doc.go b/vendor/github.com/openshift/api/machine/v1beta1/doc.go
new file mode 100644
index 0000000000..e14fc64e32
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=machine.openshift.io
+package v1beta1
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/register.go b/vendor/github.com/openshift/api/machine/v1beta1/register.go
new file mode 100644
index 0000000000..a3678c0073
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/register.go
@@ -0,0 +1,44 @@
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "machine.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+
+ scheme.AddKnownTypes(GroupVersion,
+ &Machine{},
+ &MachineList{},
+ &MachineSet{},
+ &MachineSetList{},
+ &MachineHealthCheck{},
+ &MachineHealthCheckList{},
+ )
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go
new file mode 100644
index 0000000000..f3853579bd
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go
@@ -0,0 +1,311 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type AWSMachineProviderConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // AMI is the reference to the AMI from which to create the machine instance.
+ AMI AWSResourceReference `json:"ami"`
+ // InstanceType is the type of instance to create. Example: m4.xlarge
+ InstanceType string `json:"instanceType"`
+ // Tags is the set of tags to add to apply to an instance, in addition to the ones
+ // added by default by the actuator. These tags are additive. The actuator will ensure
+ // these tags are present, but will not remove any other tags that may exist on the
+ // instance.
+ // +optional
+ Tags []TagSpecification `json:"tags,omitempty"`
+ // IAMInstanceProfile is a reference to an IAM role to assign to the instance
+ // +optional
+ IAMInstanceProfile *AWSResourceReference `json:"iamInstanceProfile,omitempty"`
+ // UserDataSecret contains a local reference to a secret that contains the
+ // UserData to apply to the instance
+ // +optional
+ UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"`
+ // CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions
+ // provided by attached IAM role where the actuator is running.
+ // +optional
+ CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"`
+ // KeyName is the name of the KeyPair to use for SSH
+ // +optional
+ KeyName *string `json:"keyName,omitempty"`
+ // DeviceIndex is the index of the device on the instance for the network interface attachment.
+ // Defaults to 0.
+ DeviceIndex int64 `json:"deviceIndex"`
+ // PublicIP specifies whether the instance should get a public IP. If not present,
+ // it should use the default of its subnet.
+ // +optional
+ PublicIP *bool `json:"publicIp,omitempty"`
+ // NetworkInterfaceType specifies the type of network interface to be used for the primary
+ // network interface.
+ // Valid values are "ENA", "EFA", and omitted, which means no opinion and the platform
+ // chooses a good default which may change over time.
+ // The current default value is "ENA".
+ // Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more
+ // about the AWS Elastic Fabric Adapter interface option.
+ // +kubebuilder:validation:Enum:="ENA";"EFA"
+ // +optional
+ NetworkInterfaceType AWSNetworkInterfaceType `json:"networkInterfaceType,omitempty"`
+ // SecurityGroups is an array of references to security groups that should be applied to the
+ // instance.
+ // +optional
+ SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"`
+ // Subnet is a reference to the subnet to use for this instance
+ Subnet AWSResourceReference `json:"subnet"`
+ // Placement specifies where to create the instance in AWS
+ Placement Placement `json:"placement"`
+ // LoadBalancers is the set of load balancers to which the new instance
+ // should be added once it is created.
+ // +optional
+ LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"`
+ // BlockDevices is the set of block device mapping associated to this instance,
+ // block device without a name will be used as a root device and only one device without a name is allowed
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
+ // +optional
+ BlockDevices []BlockDeviceMappingSpec `json:"blockDevices,omitempty"`
+ // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
+ // +optional
+ SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
+ // MetadataServiceOptions allows users to configure instance metadata service interaction options.
+ // If nothing specified, default AWS IMDS settings will be applied.
+ // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html
+ // +optional
+ MetadataServiceOptions MetadataServiceOptions `json:"metadataServiceOptions,omitempty"`
+ // PlacementGroupName specifies the name of the placement group in which to launch the instance.
+ // The placement group must already be created and may use any placement strategy.
+ // When omitted, no placement group is used when creating the EC2 instance.
+ // +optional
+ PlacementGroupName string `json:"placementGroupName,omitempty"`
+}
+
+// BlockDeviceMappingSpec describes a block device mapping
+type BlockDeviceMappingSpec struct {
+ // The device name exposed to the machine (for example, /dev/sdh or xvdh).
+ // +optional
+ DeviceName *string `json:"deviceName,omitempty"`
+ // Parameters used to automatically set up EBS volumes when the machine is
+ // launched.
+ // +optional
+ EBS *EBSBlockDeviceSpec `json:"ebs,omitempty"`
+ // Suppresses the specified device included in the block device mapping of the
+ // AMI.
+ // +optional
+ NoDevice *string `json:"noDevice,omitempty"`
+ // The virtual device name (ephemeralN). Machine store volumes are numbered
+ // starting from 0. An machine type with 2 available machine store volumes
+ // can specify mappings for ephemeral0 and ephemeral1.The number of available
+ // machine store volumes depends on the machine type. After you connect to
+ // the machine, you must mount the volume.
+ //
+ // Constraints: For M3 machines, you must specify machine store volumes in
+ // the block device mapping for the machine. When you launch an M3 machine,
+ // we ignore any machine store volumes specified in the block device mapping
+ // for the AMI.
+ // +optional
+ VirtualName *string `json:"virtualName,omitempty"`
+}
+
+// EBSBlockDeviceSpec describes a block device for an EBS volume.
+// https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice
+type EBSBlockDeviceSpec struct {
+ // Indicates whether the EBS volume is deleted on machine termination.
+ // +optional
+ DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"`
+ // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes
+ // may only be attached to machines that support Amazon EBS encryption.
+ // +optional
+ Encrypted *bool `json:"encrypted,omitempty"`
+ // Indicates the KMS key that should be used to encrypt the Amazon EBS volume.
+ // +optional
+ KMSKey AWSResourceReference `json:"kmsKey,omitempty"`
+ // The number of I/O operations per second (IOPS) that the volume supports.
+ // For io1, this represents the number of IOPS that are provisioned for the
+ // volume. For gp2, this represents the baseline performance of the volume and
+ // the rate at which the volume accumulates I/O credits for bursting. For more
+ // information about General Purpose SSD baseline performance, I/O credits,
+ // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
+ // in the Amazon Elastic Compute Cloud User Guide.
+ //
+ // Minimal and maximal IOPS for io1 and gp2 are constrained. Please, check
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
+ // for precise boundaries for individual volumes.
+ //
+ // Condition: This parameter is required for requests to create io1 volumes;
+ // it is not used in requests to create gp2, st1, sc1, or standard volumes.
+ // +optional
+ Iops *int64 `json:"iops,omitempty"`
+ // The size of the volume, in GiB.
+ //
+ // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned
+ // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for
+ // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify
+ // a snapshot, the volume size must be equal to or larger than the snapshot
+ // size.
+ //
+ // Default: If you're creating the volume from a snapshot and don't specify
+ // a volume size, the default is the snapshot size.
+ // +optional
+ VolumeSize *int64 `json:"volumeSize,omitempty"`
+ // The volume type: gp2, io1, st1, sc1, or standard.
+ // Default: standard
+ // +optional
+ VolumeType *string `json:"volumeType,omitempty"`
+}
+
+// SpotMarketOptions defines the options available to a user when configuring
+// Machines to run on Spot instances.
+// Most users should provide an empty struct.
+type SpotMarketOptions struct {
+ // The maximum price the user is willing to pay for their instances
+ // Default: On-Demand price
+ // +optional
+ MaxPrice *string `json:"maxPrice,omitempty"`
+}
+
+type MetadataServiceAuthentication string
+
+const (
+ // MetadataServiceAuthenticationRequired enforces sending of a signed token header with any instance metadata retrieval (GET) requests.
+ // Enforces IMDSv2 usage.
+ MetadataServiceAuthenticationRequired = "Required"
+ // MetadataServiceAuthenticationOptional allows IMDSv1 usage along with IMDSv2
+ MetadataServiceAuthenticationOptional = "Optional"
+)
+
+// MetadataServiceOptions defines the options available to a user when configuring
+// Instance Metadata Service (IMDS) Options.
+type MetadataServiceOptions struct {
+ // Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service.
+ // When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service.
+ // When omitted, this means the user has no opinion and the value is left to the platform to choose a good
+ // default, which is subject to change over time. The current default is optional.
+ // At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API
+ // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html
+ // +kubebuilder:validation:Enum=Required;Optional
+ // +optional
+ Authentication MetadataServiceAuthentication `json:"authentication,omitempty"`
+}
+
+// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters.
+// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in
+// a validation error.
+type AWSResourceReference struct {
+ // ID of resource
+ // +optional
+ ID *string `json:"id,omitempty"`
+ // ARN of resource
+ // +optional
+ ARN *string `json:"arn,omitempty"`
+ // Filters is a set of filters used to identify a resource
+ // +optional
+ Filters []Filter `json:"filters,omitempty"`
+}
+
+// Placement indicates where to create the instance in AWS
+type Placement struct {
+ // Region is the region to use to create the instance
+ // +optional
+ Region string `json:"region,omitempty"`
+ // AvailabilityZone is the availability zone of the instance
+ // +optional
+ AvailabilityZone string `json:"availabilityZone,omitempty"`
+ // Tenancy indicates if instance should run on shared or single-tenant hardware. There are
+ // supported 3 options: default, dedicated and host.
+ // +optional
+ Tenancy InstanceTenancy `json:"tenancy,omitempty"`
+}
+
+// Filter is a filter used to identify an AWS resource
+type Filter struct {
+ // Name of the filter. Filter names are case-sensitive.
+ Name string `json:"name"`
+ // Values includes one or more filter values. Filter values are case-sensitive.
+ // +optional
+ Values []string `json:"values,omitempty"`
+}
+
+// TagSpecification is the name/value pair for a tag
+type TagSpecification struct {
+ // Name of the tag
+ Name string `json:"name"`
+ // Value of the tag
+ Value string `json:"value"`
+}
+
+// AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type AWSMachineProviderConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []AWSMachineProviderConfig `json:"items"`
+}
+
+// LoadBalancerReference is a reference to a load balancer on AWS.
+type LoadBalancerReference struct {
+ Name string `json:"name"`
+ Type AWSLoadBalancerType `json:"type"`
+}
+
+// AWSLoadBalancerType is the type of LoadBalancer to use when registering
+// an instance with load balancers specified in LoadBalancerNames
+type AWSLoadBalancerType string
+
+// InstanceTenancy indicates if instance should run on shared or single-tenant hardware.
+type InstanceTenancy string
+
+const (
+ // DefaultTenancy instance runs on shared hardware
+ DefaultTenancy InstanceTenancy = "default"
+ // DedicatedTenancy instance runs on single-tenant hardware
+ DedicatedTenancy InstanceTenancy = "dedicated"
+ // HostTenancy instance runs on a Dedicated Host, which is an isolated server with configurations that you can control.
+ HostTenancy InstanceTenancy = "host"
+)
+
+// Possible values for AWSLoadBalancerType. Add to this list as other types
+// of load balancer are supported by the actuator.
+const (
+ ClassicLoadBalancerType AWSLoadBalancerType = "classic" // AWS classic ELB
+ NetworkLoadBalancerType AWSLoadBalancerType = "network" // AWS Network Load Balancer (NLB)
+)
+
+// AWSNetworkInterfaceType defines the network interface type of the the
+// AWS EC2 network interface.
+type AWSNetworkInterfaceType string
+
+const (
+ // AWSENANetworkInterfaceType is the default network interface type,
+ // the EC2 Elastic Network Adapter commonly used with EC2 instances.
+ // This should be used for standard network operations.
+ AWSENANetworkInterfaceType AWSNetworkInterfaceType = "ENA"
+ // AWSEFANetworkInterfaceType is the Elastic Fabric Adapter network interface type.
+ AWSEFANetworkInterfaceType AWSNetworkInterfaceType = "EFA"
+)
+
+// AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field.
+// It contains AWS-specific status information.
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type AWSMachineProviderStatus struct {
+ metav1.TypeMeta `json:",inline"`
+ // InstanceID is the instance ID of the machine created in AWS
+ // +optional
+ InstanceID *string `json:"instanceId,omitempty"`
+ // InstanceState is the state of the AWS instance for this machine
+ // +optional
+ InstanceState *string `json:"instanceState,omitempty"`
+ // Conditions is a set of conditions associated with the Machine to indicate
+ // errors or other status
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go
new file mode 100644
index 0000000000..00a9497fd3
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go
@@ -0,0 +1,575 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SecurityEncryptionTypes represents the Encryption Type when the Azure Virtual Machine is a
+// Confidential VM.
+type SecurityEncryptionTypes string
+
+const (
+ // SecurityEncryptionTypesVMGuestStateOnly disables OS disk confidential encryption.
+ SecurityEncryptionTypesVMGuestStateOnly SecurityEncryptionTypes = "VMGuestStateOnly"
+ // SecurityEncryptionTypesDiskWithVMGuestState enables OS disk confidential encryption with a
+ // platform-managed key (PMK) or a customer-managed key (CMK).
+ SecurityEncryptionTypesDiskWithVMGuestState SecurityEncryptionTypes = "DiskWithVMGuestState"
+)
+
+// SecurityTypes represents the SecurityType of the virtual machine.
+type SecurityTypes string
+
+const (
+ // SecurityTypesConfidentialVM defines the SecurityType of the virtual machine as a Confidential VM.
+ SecurityTypesConfidentialVM SecurityTypes = "ConfidentialVM"
+ // SecurityTypesTrustedLaunch defines the SecurityType of the virtual machine as a Trusted Launch VM.
+ SecurityTypesTrustedLaunch SecurityTypes = "TrustedLaunch"
+)
+
+// AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field
+// for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine.
+// Required parameters such as location that are not specified by this configuration, will be defaulted
+// by the actuator.
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type AzureMachineProviderSpec struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // UserDataSecret contains a local reference to a secret that contains the
+ // UserData to apply to the instance
+ // +optional
+ UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"`
+ // CredentialsSecret is a reference to the secret with Azure credentials.
+ // +optional
+ CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"`
+ // Location is the region to use to create the instance
+ // +optional
+ Location string `json:"location,omitempty"`
+ // VMSize is the size of the VM to create.
+ // +optional
+ VMSize string `json:"vmSize,omitempty"`
+ // Image is the OS image to use to create the instance.
+ Image Image `json:"image"`
+ // OSDisk represents the parameters for creating the OS disk.
+ OSDisk OSDisk `json:"osDisk"`
+ // DataDisk specifies the parameters that are used to add one or more data disks to the machine.
+ // +optional
+ DataDisks []DataDisk `json:"dataDisks,omitempty"`
+ // SSHPublicKey is the public key to use to SSH to the virtual machine.
+ // +optional
+ SSHPublicKey string `json:"sshPublicKey,omitempty"`
+ // PublicIP if true a public IP will be used
+ PublicIP bool `json:"publicIP"`
+ // Tags is a list of tags to apply to the machine.
+ // +optional
+ Tags map[string]string `json:"tags,omitempty"`
+ // Network Security Group that needs to be attached to the machine's interface.
+ // No security group will be attached if empty.
+ // +optional
+ SecurityGroup string `json:"securityGroup,omitempty"`
+ // Application Security Groups that need to be attached to the machine's interface.
+ // No application security groups will be attached if zero-length.
+ // +optional
+ ApplicationSecurityGroups []string `json:"applicationSecurityGroups,omitempty"`
+ // Subnet to use for this instance
+ Subnet string `json:"subnet"`
+ // PublicLoadBalancer to use for this instance
+ // +optional
+ PublicLoadBalancer string `json:"publicLoadBalancer,omitempty"`
+ // InternalLoadBalancerName to use for this instance
+ // +optional
+ InternalLoadBalancer string `json:"internalLoadBalancer,omitempty"`
+ // NatRule to set inbound NAT rule of the load balancer
+ // +optional
+ NatRule *int64 `json:"natRule,omitempty"`
+ // ManagedIdentity to set managed identity name
+ // +optional
+ ManagedIdentity string `json:"managedIdentity,omitempty"`
+ // Vnet to set virtual network name
+ // +optional
+ Vnet string `json:"vnet,omitempty"`
+ // Availability Zone for the virtual machine.
+ // If nil, the virtual machine should be deployed to no zone
+ // +optional
+ Zone string `json:"zone,omitempty"`
+ // NetworkResourceGroup is the resource group for the virtual machine's network
+ // +optional
+ NetworkResourceGroup string `json:"networkResourceGroup,omitempty"`
+ // ResourceGroup is the resource group for the virtual machine
+ // +optional
+ ResourceGroup string `json:"resourceGroup,omitempty"`
+ // SpotVMOptions allows the ability to specify the Machine should use a Spot VM
+ // +optional
+ SpotVMOptions *SpotVMOptions `json:"spotVMOptions,omitempty"`
+ // SecurityProfile specifies the Security profile settings for a virtual machine.
+ // +optional
+ SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"`
+ // UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine.
+ // This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes.
+ // This Azure feature is subject to a specific scope and certain limitations.
+ // More informations on this can be found in the official Azure documentation for Ultra Disks:
+ // (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).
+ //
+ // When omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability.
+ // If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk).
+ // This may manifest in the Pod being stuck in `ContainerCreating` phase.
+ // This defaulting behaviour may be subject to change in future.
+ //
+ // When set to "Enabled", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine.
+ // This will thus allow UltraSSD both as Data Disks and Persistent Volumes.
+ // If set to "Enabled" when the capability can't be available due to scope and limitations, the Machine will go into "Failed" state.
+ //
+ // When set to "Disabled", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes.
+ // In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a "Failed" state.
+ // If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.
+ //
+ // +kubebuilder:validation:Enum:="Enabled";"Disabled"
+ // +optional
+ UltraSSDCapability AzureUltraSSDCapabilityState `json:"ultraSSDCapability,omitempty"`
+ // AcceleratedNetworking enables or disables Azure accelerated networking feature.
+ // Set to false by default. If true, then this will depend on whether the requested
+ // VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.
+ // +optional
+ AcceleratedNetworking bool `json:"acceleratedNetworking,omitempty"`
+ // AvailabilitySet specifies the availability set to use for this instance.
+ // Availability set should be precreated, before using this field.
+ // +optional
+ AvailabilitySet string `json:"availabilitySet,omitempty"`
+ // Diagnostics configures the diagnostics settings for the virtual machine.
+ // This allows you to configure boot diagnostics such as capturing serial output from
+ // the virtual machine on boot.
+ // This is useful for debugging software based launch issues.
+ // +optional
+ Diagnostics AzureDiagnostics `json:"diagnostics,omitempty"`
+ // capacityReservationGroupID specifies the capacity reservation group resource id that should be
+ // used for allocating the virtual machine.
+ // The field size should be greater than 0 and the field input must start with '/'.
+ // The input for capacityReservationGroupID must be similar to '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'.
+ // The keys which are used should be among 'subscriptions', 'providers' and 'resourcegroups' followed by valid ID or names respectively.
+ // +optional
+ CapacityReservationGroupID string `json:"capacityReservationGroupID,omitempty"`
+}
+
+// SpotVMOptions defines the options relevant to running the Machine on Spot VMs
+type SpotVMOptions struct {
+ // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances
+ // +optional
+ MaxPrice *resource.Quantity `json:"maxPrice,omitempty"`
+}
+
+// AzureDiagnostics is used to configure the diagnostic settings of the virtual machine.
+type AzureDiagnostics struct {
+ // AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine.
+ // This allows you to configure capturing serial output from the virtual machine on boot.
+ // This is useful for debugging software based launch issues.
+ // + This is a pointer so that we can validate required fields only when the structure is
+ // + configured by the user.
+ // +optional
+ Boot *AzureBootDiagnostics `json:"boot,omitempty"`
+}
+
+// AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine.
+// This allows you to configure capturing serial output from the virtual machine on boot.
+// This is useful for debugging software based launch issues.
+// +union
+type AzureBootDiagnostics struct {
+ // StorageAccountType determines if the storage account for storing the diagnostics data
+ // should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).
+ // +kubebuilder:validation:Required
+ // +unionDiscriminator
+ StorageAccountType AzureBootDiagnosticsStorageAccountType `json:"storageAccountType"`
+
+ // CustomerManaged provides reference to the customer manager storage account.
+ // +optional
+ CustomerManaged *AzureCustomerManagedBootDiagnostics `json:"customerManaged,omitempty"`
+}
+
+// AzureCustomerManagedBootDiagnostics provides reference to a customer managed
+// storage account.
+type AzureCustomerManagedBootDiagnostics struct {
+ // StorageAccountURI is the URI of the customer managed storage account.
+ // The URI typically will be `https://.blob.core.windows.net/`
+ // but may differ if you are using Azure DNS zone endpoints.
+ // You can find the correct endpoint by looking for the Blob Primary Endpoint in the
+ // endpoints tab in the Azure console.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^https://`
+ // +kubebuilder:validation:MaxLength=1024
+ StorageAccountURI string `json:"storageAccountURI"`
+}
+
+// AzureBootDiagnosticsStorageAccountType defines the list of valid storage account types
+// for the boot diagnostics.
+// +kubebuilder:validation:Enum:="AzureManaged";"CustomerManaged"
+type AzureBootDiagnosticsStorageAccountType string
+
+const (
+ // AzureManagedAzureDiagnosticsStorage is used to determine that the diagnostics storage account
+ // should be provisioned by Azure.
+ AzureManagedAzureDiagnosticsStorage AzureBootDiagnosticsStorageAccountType = "AzureManaged"
+
+ // CustomerManagedAzureDiagnosticsStorage is used to determine that the diagnostics storage account
+ // should be provisioned by the Customer.
+ CustomerManagedAzureDiagnosticsStorage AzureBootDiagnosticsStorageAccountType = "CustomerManaged"
+)
+
+// AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field.
+// It contains Azure-specific status information.
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type AzureMachineProviderStatus struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // VMID is the ID of the virtual machine created in Azure.
+ // +optional
+ VMID *string `json:"vmId,omitempty"`
+ // VMState is the provisioning state of the Azure virtual machine.
+ // +optional
+ VMState *AzureVMState `json:"vmState,omitempty"`
+ // Conditions is a set of conditions associated with the Machine to indicate
+ // errors or other status.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// VMState describes the state of an Azure virtual machine.
+type AzureVMState string
+
+const (
+ // ProvisioningState related values
+ // VMStateCreating ...
+ VMStateCreating = AzureVMState("Creating")
+ // VMStateDeleting ...
+ VMStateDeleting = AzureVMState("Deleting")
+ // VMStateFailed ...
+ VMStateFailed = AzureVMState("Failed")
+ // VMStateMigrating ...
+ VMStateMigrating = AzureVMState("Migrating")
+ // VMStateSucceeded ...
+ VMStateSucceeded = AzureVMState("Succeeded")
+ // VMStateUpdating ...
+ VMStateUpdating = AzureVMState("Updating")
+
+ // PowerState related values
+ // VMStateStarting ...
+ VMStateStarting = AzureVMState("Starting")
+ // VMStateRunning ...
+ VMStateRunning = AzureVMState("Running")
+ // VMStateStopping ...
+ VMStateStopping = AzureVMState("Stopping")
+ // VMStateStopped ...
+ VMStateStopped = AzureVMState("Stopped")
+ // VMStateDeallocating ...
+ VMStateDeallocating = AzureVMState("Deallocating")
+ // VMStateDeallocated ...
+ VMStateDeallocated = AzureVMState("Deallocated")
+ // VMStateUnknown ...
+ VMStateUnknown = AzureVMState("Unknown")
+)
+
+// Image is a mirror of azure sdk compute.ImageReference
+type Image struct {
+ // Publisher is the name of the organization that created the image
+ Publisher string `json:"publisher"`
+ // Offer specifies the name of a group of related images created by the publisher.
+ // For example, UbuntuServer, WindowsServer
+ Offer string `json:"offer"`
+ // SKU specifies an instance of an offer, such as a major release of a distribution.
+ // For example, 18.04-LTS, 2019-Datacenter
+ SKU string `json:"sku"`
+ // Version specifies the version of an image sku. The allowed formats
+ // are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers.
+ // Specify 'latest' to use the latest version of an image available at deploy time.
+ // Even if you use 'latest', the VM image will not automatically update after deploy
+ // time even if a new version becomes available.
+ Version string `json:"version"`
+ // ResourceID specifies an image to use by ID
+ ResourceID string `json:"resourceID"`
+ // Type identifies the source of the image and related information, such as purchase plans.
+ // Valid values are "ID", "MarketplaceWithPlan", "MarketplaceNoPlan", and omitted, which
+ // means no opinion and the platform chooses a good default which may change over time.
+ // Currently that default is "MarketplaceNoPlan" if publisher data is supplied, or "ID" if not.
+ // For more information about purchase plans, see:
+ // https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information
+ // +optional
+ Type AzureImageType `json:"type,omitempty"`
+}
+
+// AzureImageType provides an enumeration for the valid image types.
+type AzureImageType string
+
+const (
+ // AzureImageTypeID specifies that the image should be referenced by its resource ID.
+ AzureImageTypeID AzureImageType = "ID"
+ // AzureImageTypeMarketplaceNoPlan are images available from the marketplace that do not require a purchase plan.
+ AzureImageTypeMarketplaceNoPlan AzureImageType = "MarketplaceNoPlan"
+ // AzureImageTypeMarketplaceWithPlan require a purchase plan. Upstream these images are referred to as "ThirdParty."
+ AzureImageTypeMarketplaceWithPlan AzureImageType = "MarketplaceWithPlan"
+)
+
+type OSDisk struct {
+ // OSType is the operating system type of the OS disk. Possible values include "Linux" and "Windows".
+ OSType string `json:"osType"`
+ // ManagedDisk specifies the Managed Disk parameters for the OS disk.
+ ManagedDisk OSDiskManagedDiskParameters `json:"managedDisk"`
+ // DiskSizeGB is the size in GB to assign to the data disk.
+ DiskSizeGB int32 `json:"diskSizeGB"`
+ // DiskSettings describe ephemeral disk settings for the os disk.
+ // +optional
+ DiskSettings DiskSettings `json:"diskSettings,omitempty"`
+ // CachingType specifies the caching requirements.
+ // Possible values include: 'None', 'ReadOnly', 'ReadWrite'.
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over
+ // time. Currently the default is `None`.
+ // +optional
+ // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite
+ CachingType string `json:"cachingType,omitempty"`
+}
+
+// DataDisk specifies the parameters that are used to add one or more data disks to the machine.
+// A Data Disk is a managed disk that's attached to a virtual machine to store application data.
+// It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume.
+// It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.
+//
+// As the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable.
+// This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization.
+// At this stage the previously defined `lun` is to be used as the "device" key for referencing the raw disk device to be initialized.
+// Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`.
+// For further guidance and examples, please refer to the official OpenShift docs.
+type DataDisk struct {
+ // NameSuffix is the suffix to be appended to the machine name to generate the disk name.
+ // Each disk name will be in format _.
+ // NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens.
+ // The overall disk name must not exceed 80 chars in length.
+ // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9](?:[\w\.-]*[a-zA-Z0-9])?$`
+ // +kubebuilder:validation:MaxLength:=78
+ // +kubebuilder:validation:Required
+ NameSuffix string `json:"nameSuffix"`
+ // DiskSizeGB is the size in GB to assign to the data disk.
+ // +kubebuilder:validation:Minimum=4
+ // +kubebuilder:validation:Required
+ DiskSizeGB int32 `json:"diskSizeGB"`
+ // ManagedDisk specifies the Managed Disk parameters for the data disk.
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over time.
+ // Currently the default is a ManagedDisk with with storageAccountType: "Premium_LRS" and diskEncryptionSet.id: "Default".
+ // +optional
+ ManagedDisk DataDiskManagedDiskParameters `json:"managedDisk,omitempty"`
+ // Lun Specifies the logical unit number of the data disk.
+ // This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
+ // This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount).
+ // The value must be between 0 and 63.
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=63
+ // +kubebuilder:validation:Required
+ Lun int32 `json:"lun,omitempty"`
+ // CachingType specifies the caching requirements.
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over time.
+ // Currently the default is CachingTypeNone.
+ // +optional
+ // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite
+ CachingType CachingTypeOption `json:"cachingType,omitempty"`
+ // DeletionPolicy specifies the data disk deletion policy upon Machine deletion.
+ // Possible values are "Delete","Detach".
+ // When "Delete" is used the data disk is deleted when the Machine is deleted.
+ // When "Detach" is used the data disk is detached from the Machine and retained when the Machine is deleted.
+ // +kubebuilder:validation:Enum=Delete;Detach
+ // +kubebuilder:validation:Required
+ DeletionPolicy DiskDeletionPolicyType `json:"deletionPolicy"`
+}
+
+// DiskDeletionPolicyType defines the possible values for DeletionPolicy.
+type DiskDeletionPolicyType string
+
+// These are the valid DiskDeletionPolicyType values.
+const (
+ // DiskDeletionPolicyTypeDelete means the DiskDeletionPolicyType is "Delete".
+ DiskDeletionPolicyTypeDelete DiskDeletionPolicyType = "Delete"
+ // DiskDeletionPolicyTypeDetach means the DiskDeletionPolicyType is "Detach".
+ DiskDeletionPolicyTypeDetach DiskDeletionPolicyType = "Detach"
+)
+
+// CachingTypeOption defines the different values for a CachingType.
+type CachingTypeOption string
+
+// These are the valid CachingTypeOption values.
+const (
+ // CachingTypeReadOnly means the CachingType is "ReadOnly".
+ CachingTypeReadOnly CachingTypeOption = "ReadOnly"
+ // CachingTypeReadWrite means the CachingType is "ReadWrite".
+ CachingTypeReadWrite CachingTypeOption = "ReadWrite"
+ // CachingTypeNone means the CachingType is "None".
+ CachingTypeNone CachingTypeOption = "None"
+)
+
+// DiskSettings describe ephemeral disk settings for the os disk.
+type DiskSettings struct {
+ // EphemeralStorageLocation enables ephemeral OS when set to 'Local'.
+ // Possible values include: 'Local'.
+ // See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details.
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over
+ // time. Currently the default is that disks are saved to remote Azure storage.
+ // +optional
+ // +kubebuilder:validation:Enum=Local
+ EphemeralStorageLocation string `json:"ephemeralStorageLocation,omitempty"`
+}
+
+// OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk.
+type OSDiskManagedDiskParameters struct {
+ // StorageAccountType is the storage account type to use.
+ // Possible values include "Standard_LRS", "Premium_LRS".
+ StorageAccountType string `json:"storageAccountType"`
+ // DiskEncryptionSet is the disk encryption set properties
+ // +optional
+ DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
+ // securityProfile specifies the security profile for the managed disk.
+ // +optional
+ SecurityProfile VMDiskSecurityProfile `json:"securityProfile,omitempty"`
+}
+
+// VMDiskSecurityProfile specifies the security profile settings for the managed disk.
+// It can be set only for Confidential VMs.
+type VMDiskSecurityProfile struct {
+ // diskEncryptionSet specifies the customer managed disk encryption set resource id for the
+ // managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and
+ // VMGuest blob.
+ // +optional
+ DiskEncryptionSet DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
+ // securityEncryptionType specifies the encryption type of the managed disk.
+ // It is set to DiskWithVMGuestState to encrypt the managed disk along with the VMGuestState
+ // blob, and to VMGuestStateOnly to encrypt the VMGuestState blob only.
+ // When set to VMGuestStateOnly, the vTPM should be enabled.
+ // When set to DiskWithVMGuestState, both SecureBoot and vTPM should be enabled.
+ // If the above conditions are not fulfilled, the VM will not be created and the respective error
+ // will be returned.
+ // It can be set only for Confidential VMs. Confidential VMs are defined by their
+ // SecurityProfile.SecurityType being set to ConfidentialVM, the SecurityEncryptionType of their
+ // OS disk being set to one of the allowed values and by enabling the respective
+ // SecurityProfile.UEFISettings of the VM (i.e. vTPM and SecureBoot), depending on the selected
+ // SecurityEncryptionType.
+ // For further details on Azure Confidential VMs, please refer to the respective documentation:
+ // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview
+ // +kubebuilder:validation:Enum=VMGuestStateOnly;DiskWithVMGuestState
+ // +optional
+ SecurityEncryptionType SecurityEncryptionTypes `json:"securityEncryptionType,omitempty"`
+}
+
+// DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk.
+type DataDiskManagedDiskParameters struct {
+ // StorageAccountType is the storage account type to use.
+ // Possible values include "Standard_LRS", "Premium_LRS" and "UltraSSD_LRS".
+ // +kubebuilder:validation:Enum=Standard_LRS;Premium_LRS;UltraSSD_LRS
+ StorageAccountType StorageAccountType `json:"storageAccountType"`
+ // DiskEncryptionSet is the disk encryption set properties.
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over time.
+ // Currently the default is a DiskEncryptionSet with id: "Default".
+ // +optional
+ DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"`
+}
+
+// StorageAccountType defines the different storage types to use for a ManagedDisk.
+type StorageAccountType string
+
+// These are the valid StorageAccountType types.
+const (
+ // "StorageAccountStandardLRS" means the Standard_LRS storage type.
+ StorageAccountStandardLRS StorageAccountType = "Standard_LRS"
+ // "StorageAccountPremiumLRS" means the Premium_LRS storage type.
+ StorageAccountPremiumLRS StorageAccountType = "Premium_LRS"
+ // "StorageAccountUltraSSDLRS" means the UltraSSD_LRS storage type.
+ StorageAccountUltraSSDLRS StorageAccountType = "UltraSSD_LRS"
+)
+
+// DiskEncryptionSetParameters is the disk encryption set properties
+type DiskEncryptionSetParameters struct {
+ // ID is the disk encryption set ID
+ // Empty value means no opinion and the platform chooses a default, which is subject to change over time.
+ // Currently the default is: "Default".
+ // +optional
+ ID string `json:"id,omitempty"`
+}
+
+// SecurityProfile specifies the Security profile settings for a
+// virtual machine or virtual machine scale set.
+type SecurityProfile struct {
+ // encryptionAtHost indicates whether Host Encryption should be enabled or disabled for a virtual
+ // machine or virtual machine scale set.
+ // This should be disabled when SecurityEncryptionType is set to DiskWithVMGuestState.
+ // Default is disabled.
+ // +optional
+ EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"`
+ // settings specify the security type and the UEFI settings of the virtual machine. This field can
+ // be set for Confidential VMs and Trusted Launch for VMs.
+ // +optional
+ Settings SecuritySettings `json:"settings,omitempty"`
+}
+
+// SecuritySettings define the security type and the UEFI settings of the virtual machine.
+// +union
+type SecuritySettings struct {
+ // securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to
+ // enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set.
+ // +kubebuilder:validation:Enum=ConfidentialVM;TrustedLaunch
+ // +kubebuilder:validation:Required
+ // +unionDiscriminator
+ SecurityType SecurityTypes `json:"securityType,omitempty"`
+ // confidentialVM specifies the security configuration of the virtual machine.
+ // For more information regarding Confidential VMs, please refer to:
+ // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview
+ // +optional
+ ConfidentialVM *ConfidentialVM `json:"confidentialVM,omitempty"`
+ // trustedLaunch specifies the security configuration of the virtual machine.
+ // For more information regarding TrustedLaunch for VMs, please refer to:
+ // https://learn.microsoft.com/azure/virtual-machines/trusted-launch
+ // +optional
+ TrustedLaunch *TrustedLaunch `json:"trustedLaunch,omitempty"`
+}
+
+// ConfidentialVM defines the UEFI settings for the virtual machine.
+type ConfidentialVM struct {
+ // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.
+ // +kubebuilder:validation:Required
+ UEFISettings UEFISettings `json:"uefiSettings,omitempty"`
+}
+
+// TrustedLaunch defines the UEFI settings for the virtual machine.
+type TrustedLaunch struct {
+ // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.
+ // +kubebuilder:validation:Required
+ UEFISettings UEFISettings `json:"uefiSettings,omitempty"`
+}
+
+// UEFISettings specifies the security settings like secure boot and vTPM used while creating the
+// virtual machine.
+type UEFISettings struct {
+ // secureBoot specifies whether secure boot should be enabled on the virtual machine.
+ // Secure Boot verifies the digital signature of all boot components and halts the boot process if
+ // signature verification fails.
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.
+ // +kubebuilder:validation:Enum=Enabled;Disabled
+ // +optional
+ SecureBoot SecureBootPolicy `json:"secureBoot,omitempty"`
+ // virtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine.
+ // When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline.
+ // The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed.
+ // This is required to be enabled if SecurityEncryptionType is defined.
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.
+ // +kubebuilder:validation:Enum=Enabled;Disabled
+ // +optional
+ VirtualizedTrustedPlatformModule VirtualizedTrustedPlatformModulePolicy `json:"virtualizedTrustedPlatformModule,omitempty"`
+}
+
+// AzureUltraSSDCapabilityState defines the different states of an UltraSSDCapability
+type AzureUltraSSDCapabilityState string
+
+// These are the valid AzureUltraSSDCapabilityState states.
+const (
+ // "AzureUltraSSDCapabilityEnabled" means the Azure UltraSSDCapability is Enabled
+ AzureUltraSSDCapabilityEnabled AzureUltraSSDCapabilityState = "Enabled"
+ // "AzureUltraSSDCapabilityDisabled" means the Azure UltraSSDCapability is Disabled
+ AzureUltraSSDCapabilityDisabled AzureUltraSSDCapabilityState = "Disabled"
+)
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go
new file mode 100644
index 0000000000..b5bb506192
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go
@@ -0,0 +1,327 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// GCPHostMaintenanceType is a type representing acceptable values for OnHostMaintenance field in GCPMachineProviderSpec
+type GCPHostMaintenanceType string
+
+const (
+ // MigrateHostMaintenanceType [default] - causes Compute Engine to live migrate an instance when there is a maintenance event.
+ MigrateHostMaintenanceType GCPHostMaintenanceType = "Migrate"
+ // TerminateHostMaintenanceType - stops an instance instead of migrating it.
+ TerminateHostMaintenanceType GCPHostMaintenanceType = "Terminate"
+)
+
+// GCPHostMaintenanceType is a type representing acceptable values for RestartPolicy field in GCPMachineProviderSpec
+type GCPRestartPolicyType string
+
+const (
+ // Restart an instance if an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event.
+ RestartPolicyAlways GCPRestartPolicyType = "Always"
+ // Do not restart an instance if an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event.
+ RestartPolicyNever GCPRestartPolicyType = "Never"
+)
+
+// SecureBootPolicy represents the secure boot configuration for the GCP machine.
+type SecureBootPolicy string
+
+const (
+ // SecureBootPolicyEnabled enables the secure boot configuration for the GCP machine.
+ SecureBootPolicyEnabled SecureBootPolicy = "Enabled"
+ // SecureBootPolicyDisabled disables the secure boot configuration for the GCP machine.
+ SecureBootPolicyDisabled SecureBootPolicy = "Disabled"
+)
+
+// VirtualizedTrustedPlatformModulePolicy represents the virtualized trusted platform module configuration for the GCP machine.
+type VirtualizedTrustedPlatformModulePolicy string
+
+const (
+ // VirtualizedTrustedPlatformModulePolicyEnabled enables the virtualized trusted platform module configuration for the GCP machine.
+ VirtualizedTrustedPlatformModulePolicyEnabled VirtualizedTrustedPlatformModulePolicy = "Enabled"
+ // VirtualizedTrustedPlatformModulePolicyDisabled disables the virtualized trusted platform module configuration for the GCP machine.
+ VirtualizedTrustedPlatformModulePolicyDisabled VirtualizedTrustedPlatformModulePolicy = "Disabled"
+)
+
+// IntegrityMonitoringPolicy represents the integrity monitoring configuration for the GCP machine.
+type IntegrityMonitoringPolicy string
+
+const (
+ // IntegrityMonitoringPolicyEnabled enables integrity monitoring for the GCP machine.
+ IntegrityMonitoringPolicyEnabled IntegrityMonitoringPolicy = "Enabled"
+ // IntegrityMonitoringPolicyDisabled disables integrity monitoring for the GCP machine.
+ IntegrityMonitoringPolicyDisabled IntegrityMonitoringPolicy = "Disabled"
+)
+
+// ConfidentialComputePolicy represents the confidential compute configuration for the GCP machine.
+type ConfidentialComputePolicy string
+
+const (
+ // ConfidentialComputePolicyEnabled enables confidential compute for the GCP machine.
+ ConfidentialComputePolicyEnabled ConfidentialComputePolicy = "Enabled"
+ // ConfidentialComputePolicyDisabled disables confidential compute for the GCP machine.
+ ConfidentialComputePolicyDisabled ConfidentialComputePolicy = "Disabled"
+)
+
+// GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field
+// for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine.
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type GCPMachineProviderSpec struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // UserDataSecret contains a local reference to a secret that contains the
+ // UserData to apply to the instance
+ // +optional
+ UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"`
+ // CredentialsSecret is a reference to the secret with GCP credentials.
+ // +optional
+ CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"`
+ // CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs.
+ // This is required if you plan to use this instance to forward routes.
+ CanIPForward bool `json:"canIPForward"`
+ // DeletionProtection whether the resource should be protected against deletion.
+ DeletionProtection bool `json:"deletionProtection"`
+ // Disks is a list of disks to be attached to the VM.
+ // +optional
+ Disks []*GCPDisk `json:"disks,omitempty"`
+ // Labels list of labels to apply to the VM.
+ // +optional
+ Labels map[string]string `json:"labels,omitempty"`
+ // Metadata key/value pairs to apply to the VM.
+ // +optional
+ Metadata []*GCPMetadata `json:"gcpMetadata,omitempty"`
+ // NetworkInterfaces is a list of network interfaces to be attached to the VM.
+ // +optional
+ NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"`
+ // ServiceAccounts is a list of GCP service accounts to be used by the VM.
+ ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"`
+ // Tags list of network tags to apply to the VM.
+ Tags []string `json:"tags,omitempty"`
+ // TargetPools are used for network TCP/UDP load balancing. A target pool references member instances,
+ // an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool
+ // +optional
+ TargetPools []string `json:"targetPools,omitempty"`
+ // MachineType is the machine type to use for the VM.
+ MachineType string `json:"machineType"`
+ // Region is the region in which the GCP machine provider will create the VM.
+ Region string `json:"region"`
+ // Zone is the zone in which the GCP machine provider will create the VM.
+ Zone string `json:"zone"`
+ // ProjectID is the project in which the GCP machine provider will create the VM.
+ // +optional
+ ProjectID string `json:"projectID,omitempty"`
+ // GPUs is a list of GPUs to be attached to the VM.
+ // +optional
+ GPUs []GCPGPUConfig `json:"gpus,omitempty"`
+ // Preemptible indicates if created instance is preemptible.
+ // +optional
+ Preemptible bool `json:"preemptible,omitempty"`
+ // OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot.
+ // This is required to be set to "Terminate" if you want to provision machine with attached GPUs.
+ // Otherwise, allowed values are "Migrate" and "Terminate".
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Migrate".
+ // +kubebuilder:validation:Enum=Migrate;Terminate;
+ // +optional
+ OnHostMaintenance GCPHostMaintenanceType `json:"onHostMaintenance,omitempty"`
+ // RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default "Always").
+ // Cannot be "Always" with preemptible instances.
+ // Otherwise, allowed values are "Always" and "Never".
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Always".
+ // RestartPolicy represents AutomaticRestart in GCP compute api
+ // +kubebuilder:validation:Enum=Always;Never;
+ // +optional
+ RestartPolicy GCPRestartPolicyType `json:"restartPolicy,omitempty"`
+
+ // ShieldedInstanceConfig is the Shielded VM configuration for the VM
+ // +optional
+ ShieldedInstanceConfig GCPShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"`
+
+ // confidentialCompute Defines whether the instance should have confidential compute enabled.
+ // If enabled OnHostMaintenance is required to be set to "Terminate".
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is false.
+ // +kubebuilder:validation:Enum=Enabled;Disabled
+ // +optional
+ ConfidentialCompute ConfidentialComputePolicy `json:"confidentialCompute,omitempty"`
+
+ // resourceManagerTags is an optional list of tags to apply to the GCP resources created for
+ // the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for
+ // information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.
+ // +kubebuilder:validation:MaxItems=50
+ // +listType=map
+ // +listMapKey=key
+ // +optional
+ ResourceManagerTags []ResourceManagerTag `json:"resourceManagerTags,omitempty"`
+}
+
+// ResourceManagerTag is a tag to apply to GCP resources created for the cluster.
+type ResourceManagerTag struct {
+ // parentID is the ID of the hierarchical resource where the tags are defined
+ // e.g. at the Organization or the Project level. To find the Organization or Project ID ref
+ // https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id
+ // https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects
+ // An OrganizationID can have a maximum of 32 characters and must consist of decimal numbers, and
+ // cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain
+ // lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=32
+ // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)`
+ ParentID string `json:"parentID"`
+
+ // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty.
+ // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase
+ // alphanumeric characters, and the following special characters `._-`.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$`
+ Key string `json:"key"`
+
+ // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty.
+ // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase
+ // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=63
+ // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$`
+ Value string `json:"value"`
+}
+
+// GCPDisk describes disks for GCP.
+type GCPDisk struct {
+ // AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).
+ AutoDelete bool `json:"autoDelete"`
+ // Boot indicates if this is a boot disk (default false).
+ Boot bool `json:"boot"`
+ // SizeGB is the size of the disk (in GB).
+ SizeGB int64 `json:"sizeGb"`
+ // Type is the type of the disk (eg: pd-standard).
+ Type string `json:"type"`
+ // Image is the source image to create this disk.
+ Image string `json:"image"`
+ // Labels list of labels to apply to the disk.
+ Labels map[string]string `json:"labels"`
+ // EncryptionKey is the customer-supplied encryption key of the disk.
+ // +optional
+ EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"`
+}
+
+// GCPMetadata describes metadata for GCP.
+type GCPMetadata struct {
+ // Key is the metadata key.
+ Key string `json:"key"`
+ // Value is the metadata value.
+ Value *string `json:"value"`
+}
+
+// GCPNetworkInterface describes network interfaces for GCP
+type GCPNetworkInterface struct {
+ // PublicIP indicates if true a public IP will be used
+ PublicIP bool `json:"publicIP,omitempty"`
+ // Network is the network name.
+ Network string `json:"network,omitempty"`
+ // ProjectID is the project in which the GCP machine provider will create the VM.
+ ProjectID string `json:"projectID,omitempty"`
+ // Subnetwork is the subnetwork name.
+ Subnetwork string `json:"subnetwork,omitempty"`
+}
+
+// GCPServiceAccount describes service accounts for GCP.
+type GCPServiceAccount struct {
+ // Email is the service account email.
+ Email string `json:"email"`
+ // Scopes list of scopes to be assigned to the service account.
+ Scopes []string `json:"scopes"`
+}
+
+// GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption.
+type GCPEncryptionKeyReference struct {
+ // KMSKeyName is the reference KMS key, in the format
+ // +optional
+ KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"`
+ // KMSKeyServiceAccount is the service account being used for the
+ // encryption request for the given KMS key. If absent, the Compute
+ // Engine default service account is used.
+ // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account
+ // for details on the default service account.
+ // +optional
+ KMSKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"`
+}
+
+// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key
+type GCPKMSKeyReference struct {
+ // Name is the name of the customer managed encryption key to be used for the disk encryption.
+ Name string `json:"name"`
+ // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.
+ KeyRing string `json:"keyRing"`
+ // ProjectID is the ID of the Project in which the KMS Key Ring exists.
+ // Defaults to the VM ProjectID if not set.
+ // +optional
+ ProjectID string `json:"projectID,omitempty"`
+ // Location is the GCP location in which the Key Ring exists.
+ Location string `json:"location"`
+}
+
+// GCPGPUConfig describes type and count of GPUs attached to the instance on GCP.
+type GCPGPUConfig struct {
+ // Count is the number of GPUs to be attached to an instance.
+ Count int32 `json:"count"`
+ // Type is the type of GPU to be attached to an instance.
+ // Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4
+ // +kubebuilder:validation:Pattern=`^nvidia-tesla-(k80|p100|v100|p4|t4)$`
+ Type string `json:"type"`
+}
+
+// GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field.
+// It contains GCP-specific status information.
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type GCPMachineProviderStatus struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // InstanceID is the ID of the instance in GCP
+ // +optional
+ InstanceID *string `json:"instanceId,omitempty"`
+ // InstanceState is the provisioning state of the GCP Instance.
+ // +optional
+ InstanceState *string `json:"instanceState,omitempty"`
+ // Conditions is a set of conditions associated with the Machine to indicate
+ // errors or other status
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP.
+// Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring.
+type GCPShieldedInstanceConfig struct {
+ // SecureBoot Defines whether the instance should have secure boot enabled.
+ // Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails.
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.
+ // +kubebuilder:validation:Enum=Enabled;Disabled
+ //+optional
+ SecureBoot SecureBootPolicy `json:"secureBoot,omitempty"`
+
+ // VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline.
+ // The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed.
+ // This is required to be set to "Enabled" if IntegrityMonitoring is enabled.
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.
+ // +kubebuilder:validation:Enum=Enabled;Disabled
+ // +optional
+ VirtualizedTrustedPlatformModule VirtualizedTrustedPlatformModulePolicy `json:"virtualizedTrustedPlatformModule,omitempty"`
+
+ // IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity.
+ // Compares the most recent boot measurements to the integrity policy baseline and return
+ // a pair of pass/fail results depending on whether they match or not.
+ // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.
+ // +kubebuilder:validation:Enum=Enabled;Disabled
+ // +optional
+ IntegrityMonitoring IntegrityMonitoringPolicy `json:"integrityMonitoring,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go
new file mode 100644
index 0000000000..a2752733df
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go
@@ -0,0 +1,395 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+const (
+ // MachineFinalizer is set on PrepareForCreate callback.
+ MachineFinalizer = "machine.machine.openshift.io"
+
+ // MachineClusterLabelName is the label set on machines linked to a cluster.
+ MachineClusterLabelName = "cluster.k8s.io/cluster-name"
+
+ // MachineClusterIDLabel is the label that a machine must have to identify the
+ // cluster to which it belongs.
+ MachineClusterIDLabel = "machine.openshift.io/cluster-api-cluster"
+
+ // IPClaimProtectionFinalizer is placed on an IPAddressClaim by the machine reconciler
+ // when an IPAddressClaim associated with a machine is created. This finalizer is removed
+ // from the IPAddressClaim when the associated machine is deleted.
+ IPClaimProtectionFinalizer = "machine.openshift.io/ip-claim-protection"
+)
+
+type MachineStatusError string
+
+const (
+ // Represents that the combination of configuration in the MachineSpec
+ // is not supported by this cluster. This is not a transient error, but
+ // indicates a state that must be fixed before progress can be made.
+ //
+ // Example: the ProviderSpec specifies an instance type that doesn't exist,
+ InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration"
+
+ // This indicates that the MachineSpec has been updated in a way that
+ // is not supported for reconciliation on this cluster. The spec may be
+ // completely valid from a configuration standpoint, but the controller
+ // does not support changing the real world state to match the new
+ // spec.
+ //
+ // Example: the responsible controller is not capable of changing the
+ // container runtime from docker to rkt.
+ UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange"
+
+ // This generally refers to exceeding one's quota in a cloud provider,
+ // or running out of physical machines in an on-premise environment.
+ InsufficientResourcesMachineError MachineStatusError = "InsufficientResources"
+
+ // There was an error while trying to create a Node to match this
+ // Machine. This may indicate a transient problem that will be fixed
+ // automatically with time, such as a service outage, or a terminal
+ // error during creation that doesn't match a more specific
+ // MachineStatusError value.
+ //
+ // Example: timeout trying to connect to GCE.
+ CreateMachineError MachineStatusError = "CreateError"
+
+ // There was an error while trying to update a Node that this
+ // Machine represents. This may indicate a transient problem that will be
+ // fixed automatically with time, such as a service outage,
+ //
+ // Example: error updating load balancers
+ UpdateMachineError MachineStatusError = "UpdateError"
+
+ // An error was encountered while trying to delete the Node that this
+ // Machine represents. This could be a transient or terminal error, but
+ // will only be observable if the provider's Machine controller has
+ // added a finalizer to the object to more gracefully handle deletions.
+ //
+ // Example: cannot resolve EC2 IP address.
+ DeleteMachineError MachineStatusError = "DeleteError"
+
+ // TemplateClonedFromGroupKindAnnotation is the infrastructure machine
+ // annotation that stores the group-kind of the infrastructure template resource
+ // that was cloned for the machine. This annotation is set only during cloning a
+ // template. Older/adopted machines will not have this annotation.
+ TemplateClonedFromGroupKindAnnotation = "machine.openshift.io/cloned-from-groupkind"
+
+ // TemplateClonedFromNameAnnotation is the infrastructure machine annotation that
+ // stores the name of the infrastructure template resource
+ // that was cloned for the machine. This annotation is set only during cloning a
+ // template. Older/adopted machines will not have this annotation.
+ TemplateClonedFromNameAnnotation = "machine.openshift.io/cloned-from-name"
+
+ // This error indicates that the machine did not join the cluster
+ // as a new node within the expected timeframe after instance
+ // creation at the provider succeeded
+ //
+ // Example use case: A controller that deletes Machines which do
+ // not result in a Node joining the cluster within a given timeout
+ // and that are managed by a MachineSet
+ JoinClusterTimeoutMachineError = "JoinClusterTimeoutError"
+
+ // IPAddressInvalidReason is set to indicate that the claimed IP address is not valid.
+ IPAddressInvalidReason MachineStatusError = "IPAddressInvalid"
+)
+
+type ClusterStatusError string
+
+const (
+ // InvalidConfigurationClusterError indicates that the cluster
+ // configuration is invalid.
+ InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration"
+
+ // UnsupportedChangeClusterError indicates that the cluster
+ // spec has been updated in an unsupported way. That cannot be
+ // reconciled.
+ UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange"
+
+ // CreateClusterError indicates that an error was encountered
+ // when trying to create the cluster.
+ CreateClusterError ClusterStatusError = "CreateError"
+
+ // UpdateClusterError indicates that an error was encountered
+ // when trying to update the cluster.
+ UpdateClusterError ClusterStatusError = "UpdateError"
+
+ // DeleteClusterError indicates that an error was encountered
+ // when trying to delete the cluster.
+ DeleteClusterError ClusterStatusError = "DeleteError"
+)
+
+type MachineSetStatusError string
+
+const (
+ // Represents that the combination of configuration in the MachineTemplateSpec
+ // is not supported by this cluster. This is not a transient error, but
+ // indicates a state that must be fixed before progress can be made.
+ //
+ // Example: the ProviderSpec specifies an instance type that doesn't exist.
+ InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration"
+)
+
+type MachineDeploymentStrategyType string
+
+const (
+ // Replace the old MachineSet by new one using rolling update
+ // i.e. gradually scale down the old MachineSet and scale up the new one.
+ RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate"
+)
+
+const (
+ // PhaseFailed indicates a state that will need to be fixed before progress can be made.
+ // Failed machines have encountered a terminal error and must be deleted.
+ // https://github.com/openshift/enhancements/blob/master/enhancements/machine-instance-lifecycle.md
+ // e.g. Instance does NOT exist but Machine has providerID/addresses.
+ // e.g. Cloud service returns a 4xx response.
+ PhaseFailed string = "Failed"
+
+ // PhaseProvisioning indicates the instance does NOT exist.
+ // The machine has NOT been given a providerID or addresses.
+ // Provisioning implies that the Machine API is in the process of creating the instance.
+ PhaseProvisioning string = "Provisioning"
+
+ // PhaseProvisioned indicates the instance exists.
+ // The machine has been given a providerID and addresses.
+ // The machine API successfully provisioned an instance which has not yet joined the cluster,
+ // as such, the machine has NOT yet been given a nodeRef.
+ PhaseProvisioned string = "Provisioned"
+
+ // PhaseRunning indicates the instance exists and the node has joined the cluster.
+ // The machine has been given a providerID, addresses, and a nodeRef.
+ PhaseRunning string = "Running"
+
+ // PhaseDeleting indicates the machine has a deletion timestamp and that the
+ // Machine API is now in the process of removing the machine from the cluster.
+ PhaseDeleting string = "Deleting"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Machine is the Schema for the machines API
+// +k8s:openapi-gen=true
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=machines,scope=Namespaced
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/948
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=machine-api,operatorOrdering=01
+// +openshift:capability=MachineAPI
+// +kubebuilder:metadata:annotations="exclude.release.openshift.io/internal-openshift-hosted=true"
+// +kubebuilder:metadata:annotations="include.release.openshift.io/self-managed-high-availability=true"
+// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Phase of machine"
+// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/instance-type']",description="Type of instance"
+// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/region']",description="Region associated with machine"
+// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/zone']",description="Zone associated with machine"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machine age"
+// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.nodeRef.name",description="Node associated with machine",priority=1
+// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID of machine created in cloud provider",priority=1
+// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".metadata.annotations['machine\\.openshift\\.io/instance-state']",description="State of instance",priority=1
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type Machine struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec MachineSpec `json:"spec,omitempty"`
+ Status MachineStatus `json:"status,omitempty"`
+}
+
+// MachineSpec defines the desired state of Machine
+type MachineSpec struct {
+ // ObjectMeta will autopopulate the Node created. Use this to
+ // indicate what labels, annotations, name prefix, etc., should be used
+ // when creating the Node.
+ // +optional
+ ObjectMeta `json:"metadata,omitempty"`
+
+ // LifecycleHooks allow users to pause operations on the machine at
+ // certain predefined points within the machine lifecycle.
+ // +optional
+ LifecycleHooks LifecycleHooks `json:"lifecycleHooks,omitempty"`
+
+ // The list of the taints to be applied to the corresponding Node in additive
+ // manner. This list will not overwrite any other taints added to the Node on
+ // an ongoing basis by other entities. These taints should be actively reconciled
+ // e.g. if you ask the machine controller to apply a taint and then manually remove
+ // the taint the machine controller will put it back) but not have the machine controller
+ // remove any taints
+ // +optional
+ Taints []corev1.Taint `json:"taints,omitempty"`
+
+ // ProviderSpec details Provider-specific configuration to use during node creation.
+ // +optional
+ ProviderSpec ProviderSpec `json:"providerSpec"`
+
+ // ProviderID is the identification ID of the machine provided by the provider.
+ // This field must match the provider ID as seen on the node object corresponding to this machine.
+ // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler
+ // with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out
+ // machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a
+ // generic out-of-tree provider for autoscaler, this field is required by autoscaler to be
+ // able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver
+ // and then a comparison is done to find out unregistered machines and are marked for delete.
+ // This field will be set by the actuators and consumed by higher level entities like autoscaler that will
+ // be interfacing with cluster-api as generic provider.
+ // +optional
+ ProviderID *string `json:"providerID,omitempty"`
+}
+
+// LifecycleHooks allow users to pause operations on the machine at
+// certain prefedined points within the machine lifecycle.
+type LifecycleHooks struct {
+ // PreDrain hooks prevent the machine from being drained.
+ // This also blocks further lifecycle events, such as termination.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ PreDrain []LifecycleHook `json:"preDrain,omitempty"`
+
+ // PreTerminate hooks prevent the machine from being terminated.
+ // PreTerminate hooks be actioned after the Machine has been drained.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ PreTerminate []LifecycleHook `json:"preTerminate,omitempty"`
+}
+
+// LifecycleHook represents a single instance of a lifecycle hook
+type LifecycleHook struct {
+ // Name defines a unique name for the lifcycle hook.
+ // The name should be unique and descriptive, ideally 1-3 words, in CamelCase or
+ // it may be namespaced, eg. foo.example.com/CamelCase.
+ // Names must be unique and should only be managed by a single entity.
+ // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+ // +kubebuilder:validation:MinLength:=3
+ // +kubebuilder:validation:MaxLength:=256
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+
+ // Owner defines the owner of the lifecycle hook.
+ // This should be descriptive enough so that users can identify
+ // who/what is responsible for blocking the lifecycle.
+ // This could be the name of a controller (e.g. clusteroperator/etcd)
+ // or an administrator managing the hook.
+ // +kubebuilder:validation:MinLength:=3
+ // +kubebuilder:validation:MaxLength:=512
+ // +kubebuilder:validation:Required
+ Owner string `json:"owner"`
+}
+
+// MachineStatus defines the observed state of Machine
+type MachineStatus struct {
+ // NodeRef will point to the corresponding Node if it exists.
+ // +optional
+ NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"`
+
+ // LastUpdated identifies when this status was last observed.
+ // +optional
+ LastUpdated *metav1.Time `json:"lastUpdated,omitempty"`
+
+ // ErrorReason will be set in the event that there is a terminal problem
+ // reconciling the Machine and will contain a succinct value suitable
+ // for machine interpretation.
+ //
+ // This field should not be set for transitive errors that a controller
+ // faces that are expected to be fixed automatically over
+ // time (like service outages), but instead indicate that something is
+ // fundamentally wrong with the Machine's spec or the configuration of
+ // the controller, and that manual intervention is required. Examples
+ // of terminal errors would be invalid combinations of settings in the
+ // spec, values that are unsupported by the controller, or the
+ // responsible controller itself being critically misconfigured.
+ //
+ // Any transient errors that occur during the reconciliation of Machines
+ // can be added as events to the Machine object and/or logged in the
+ // controller's output.
+ // +optional
+ ErrorReason *MachineStatusError `json:"errorReason,omitempty"`
+
+ // ErrorMessage will be set in the event that there is a terminal problem
+ // reconciling the Machine and will contain a more verbose string suitable
+ // for logging and human consumption.
+ //
+ // This field should not be set for transitive errors that a controller
+ // faces that are expected to be fixed automatically over
+ // time (like service outages), but instead indicate that something is
+ // fundamentally wrong with the Machine's spec or the configuration of
+ // the controller, and that manual intervention is required. Examples
+ // of terminal errors would be invalid combinations of settings in the
+ // spec, values that are unsupported by the controller, or the
+ // responsible controller itself being critically misconfigured.
+ //
+ // Any transient errors that occur during the reconciliation of Machines
+ // can be added as events to the Machine object and/or logged in the
+ // controller's output.
+ // +optional
+ ErrorMessage *string `json:"errorMessage,omitempty"`
+
+ // ProviderStatus details a Provider-specific status.
+ // It is recommended that providers maintain their
+ // own versioned API types that should be
+ // serialized/deserialized from this field.
+ // +optional
+ // +kubebuilder:validation:XPreserveUnknownFields
+ ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"`
+
+ // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.
+ // +optional
+ Addresses []corev1.NodeAddress `json:"addresses,omitempty"`
+
+ // LastOperation describes the last-operation performed by the machine-controller.
+ // This API should be useful as a history in terms of the latest operation performed on the
+ // specific machine. It should also convey the state of the latest-operation for example if
+ // it is still on-going, failed or completed successfully.
+ // +optional
+ LastOperation *LastOperation `json:"lastOperation,omitempty"`
+
+ // Phase represents the current phase of machine actuation.
+ // One of: Failed, Provisioning, Provisioned, Running, Deleting
+ // +optional
+ Phase *string `json:"phase,omitempty"`
+
+ // Conditions defines the current state of the Machine
+ Conditions Conditions `json:"conditions,omitempty"`
+}
+
+// LastOperation represents the detail of the last performed operation on the MachineObject.
+type LastOperation struct {
+ // Description is the human-readable description of the last operation.
+ Description *string `json:"description,omitempty"`
+
+ // LastUpdated is the timestamp at which LastOperation API was last-updated.
+ LastUpdated *metav1.Time `json:"lastUpdated,omitempty"`
+
+ // State is the current status of the last performed operation.
+ // E.g. Processing, Failed, Successful etc
+ State *string `json:"state,omitempty"`
+
+ // Type is the type of operation which was last performed.
+ // E.g. Create, Delete, Update etc
+ Type *string `json:"type,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MachineList contains a list of Machine
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type MachineList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []Machine `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go
new file mode 100644
index 0000000000..9963690f8f
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go
@@ -0,0 +1,148 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// RemediationStrategyType contains remediation strategy type
+type RemediationStrategyType string
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MachineHealthCheck is the Schema for the machinehealthchecks API
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=machinehealthchecks,scope=Namespaced,shortName=mhc;mhcs
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1032
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=machine-api,operatorOrdering=01
+// +openshift:capability=MachineAPI
+// +kubebuilder:metadata:annotations="exclude.release.openshift.io/internal-openshift-hosted=true"
+// +kubebuilder:metadata:annotations="include.release.openshift.io/self-managed-high-availability=true"
+// +k8s:openapi-gen=true
+// +kubebuilder:printcolumn:name="MaxUnhealthy",type="string",JSONPath=".spec.maxUnhealthy",description="Maximum number of unhealthy machines allowed"
+// +kubebuilder:printcolumn:name="ExpectedMachines",type="integer",JSONPath=".status.expectedMachines",description="Number of machines currently monitored"
+// +kubebuilder:printcolumn:name="CurrentHealthy",type="integer",JSONPath=".status.currentHealthy",description="Current observed healthy machines"
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type MachineHealthCheck struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Specification of machine health check policy
+ // +optional
+ Spec MachineHealthCheckSpec `json:"spec,omitempty"`
+
+ // Most recently observed status of MachineHealthCheck resource
+ // +optional
+ Status MachineHealthCheckStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MachineHealthCheckList contains a list of MachineHealthCheck
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type MachineHealthCheckList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []MachineHealthCheck `json:"items"`
+}
+
+// MachineHealthCheckSpec defines the desired state of MachineHealthCheck
+type MachineHealthCheckSpec struct {
+ // Label selector to match machines whose health will be exercised.
+ // Note: An empty selector will match all machines.
+ Selector metav1.LabelSelector `json:"selector"`
+
+ // UnhealthyConditions contains a list of the conditions that determine
+ // whether a node is considered unhealthy. The conditions are combined in a
+ // logical OR, i.e. if any of the conditions is met, the node is unhealthy.
+ //
+ // +kubebuilder:validation:MinItems=1
+ UnhealthyConditions []UnhealthyCondition `json:"unhealthyConditions"`
+
+ // Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by
+ // "selector" are not healthy.
+ // Expects either a postive integer value or a percentage value.
+ // Percentage values must be positive whole numbers and are capped at 100%.
+ // Both 0 and 0% are valid and will block all remediation.
+ // +kubebuilder:default:="100%"
+ // +kubebuilder:validation:XIntOrString
+ // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$"
+ // +optional
+ MaxUnhealthy *intstr.IntOrString `json:"maxUnhealthy,omitempty"`
+
+ // Machines older than this duration without a node will be considered to have
+ // failed and will be remediated.
+ // To prevent Machines without Nodes from being removed, disable startup checks
+ // by setting this value explicitly to "0".
+ // Expects an unsigned duration string of decimal numbers each with optional
+ // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m".
+ // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+ // +optional
+ // +kubebuilder:default:="10m"
+ // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
+ // +kubebuilder:validation:Type:=string
+ // +optional
+ NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"`
+
+ // RemediationTemplate is a reference to a remediation template
+ // provided by an infrastructure provider.
+ //
+ // This field is completely optional, when filled, the MachineHealthCheck controller
+ // creates a new object from the template referenced and hands off remediation of the machine to
+ // a controller that lives outside of Machine API Operator.
+ // +optional
+ RemediationTemplate *corev1.ObjectReference `json:"remediationTemplate,omitempty"`
+}
+
+// UnhealthyCondition represents a Node condition type and value with a timeout
+// specified as a duration. When the named condition has been in the given
+// status for at least the timeout value, a node is considered unhealthy.
+type UnhealthyCondition struct {
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:MinLength=1
+ Type corev1.NodeConditionType `json:"type"`
+
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:MinLength=1
+ Status corev1.ConditionStatus `json:"status"`
+
+ // Expects an unsigned duration string of decimal numbers each with optional
+ // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m".
+ // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
+ // +kubebuilder:validation:Type:=string
+ Timeout metav1.Duration `json:"timeout"`
+}
+
+// MachineHealthCheckStatus defines the observed state of MachineHealthCheck
+type MachineHealthCheckStatus struct {
+ // total number of machines counted by this machine health check
+ // +kubebuilder:validation:Minimum=0
+ ExpectedMachines *int `json:"expectedMachines"`
+
+ // total number of machines counted by this machine health check
+ // +kubebuilder:validation:Minimum=0
+ CurrentHealthy *int `json:"currentHealthy"`
+
+ // RemediationsAllowed is the number of further remediations allowed by this machine health check before
+ // maxUnhealthy short circuiting will be applied
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ RemediationsAllowed int32 `json:"remediationsAllowed"`
+
+ // Conditions defines the current state of the MachineHealthCheck
+ // +optional
+ Conditions Conditions `json:"conditions,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go
new file mode 100644
index 0000000000..aadb519b7d
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go
@@ -0,0 +1,152 @@
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MachineSet ensures that a specified number of machines replicas are running at any given time.
+// +k8s:openapi-gen=true
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=machinesets,scope=Namespaced
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1032
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=machine-api,operatorOrdering=01
+// +openshift:capability=MachineAPI
+// +kubebuilder:metadata:annotations="exclude.release.openshift.io/internal-openshift-hosted=true"
+// +kubebuilder:metadata:annotations="include.release.openshift.io/self-managed-high-availability=true"
+// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector
+// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas"
+// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas"
+// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas"
+// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.availableReplicas",description="Observed number of available replicas"
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machineset age"
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type MachineSet struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec MachineSetSpec `json:"spec,omitempty"`
+ Status MachineSetStatus `json:"status,omitempty"`
+}
+
+// MachineSetSpec defines the desired state of MachineSet
+type MachineSetSpec struct {
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // +kubebuilder:default=1
+ Replicas *int32 `json:"replicas,omitempty"`
+ // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready.
+ // Defaults to 0 (machine will be considered available as soon as it is ready)
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty"`
+ // DeletePolicy defines the policy used to identify nodes to delete when downscaling.
+ // Defaults to "Random". Valid values are "Random, "Newest", "Oldest"
+ // +kubebuilder:validation:Enum=Random;Newest;Oldest
+ DeletePolicy string `json:"deletePolicy,omitempty"`
+ // Selector is a label query over machines that should match the replica count.
+ // Label keys and values that must match in order to be controlled by this MachineSet.
+ // It must match the machine template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector metav1.LabelSelector `json:"selector"`
+ // Template is the object that describes the machine that will be created if
+ // insufficient replicas are detected.
+ // +optional
+ Template MachineTemplateSpec `json:"template,omitempty"`
+}
+
+// MachineSetDeletePolicy defines how priority is assigned to nodes to delete when
+// downscaling a MachineSet. Defaults to "Random".
+type MachineSetDeletePolicy string
+
+const (
+ // RandomMachineSetDeletePolicy prioritizes both Machines that have the annotation
+ // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy
+ // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value).
+ // Finally, it picks Machines at random to delete.
+ RandomMachineSetDeletePolicy MachineSetDeletePolicy = "Random"
+ // NewestMachineSetDeletePolicy prioritizes both Machines that have the annotation
+ // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy
+ // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value).
+ // It then prioritizes the newest Machines for deletion based on the Machine's CreationTimestamp.
+ NewestMachineSetDeletePolicy MachineSetDeletePolicy = "Newest"
+ // OldestMachineSetDeletePolicy prioritizes both Machines that have the annotation
+ // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy
+ // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value).
+ // It then prioritizes the oldest Machines for deletion based on the Machine's CreationTimestamp.
+ OldestMachineSetDeletePolicy MachineSetDeletePolicy = "Oldest"
+)
+
+// MachineTemplateSpec describes the data needed to create a Machine from a template
+type MachineTemplateSpec struct {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ ObjectMeta `json:"metadata,omitempty"`
+ // Specification of the desired behavior of the machine.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec MachineSpec `json:"spec,omitempty"`
+}
+
+// MachineSetStatus defines the observed state of MachineSet
+type MachineSetStatus struct {
+ // Replicas is the most recently observed number of replicas.
+ Replicas int32 `json:"replicas"`
+ // The number of replicas that have labels matching the labels of the machine template of the MachineSet.
+ // +optional
+ FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"`
+ // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready".
+ // +optional
+ ReadyReplicas int32 `json:"readyReplicas,omitempty"`
+ // The number of available replicas (ready for at least minReadySeconds) for this MachineSet.
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty"`
+ // ObservedGeneration reflects the generation of the most recently observed MachineSet.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+ // In the event that there is a terminal problem reconciling the
+ // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason
+ // will be populated with a succinct value suitable for machine
+ // interpretation, while ErrorMessage will contain a more verbose
+ // string suitable for logging and human consumption.
+ //
+ // These fields should not be set for transitive errors that a
+ // controller faces that are expected to be fixed automatically over
+ // time (like service outages), but instead indicate that something is
+ // fundamentally wrong with the MachineTemplate's spec or the configuration of
+ // the machine controller, and that manual intervention is required. Examples
+ // of terminal errors would be invalid combinations of settings in the
+ // spec, values that are unsupported by the machine controller, or the
+ // responsible machine controller itself being critically misconfigured.
+ //
+ // Any transient errors that occur during the reconciliation of Machines
+ // can be added as events to the MachineSet object and/or logged in the
+ // controller's output.
+ // +optional
+ ErrorReason *MachineSetStatusError `json:"errorReason,omitempty"`
+ // +optional
+ ErrorMessage *string `json:"errorMessage,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MachineSetList contains a list of MachineSet
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type MachineSetList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []MachineSet `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go
new file mode 100644
index 0000000000..a8f5b66a6d
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go
@@ -0,0 +1,229 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// ProviderSpec defines the configuration to use during node creation.
+type ProviderSpec struct {
+
+ // No more than one of the following may be specified.
+
+ // Value is an inlined, serialized representation of the resource
+ // configuration. It is recommended that providers maintain their own
+ // versioned API types that should be serialized/deserialized from this
+ // field, akin to component config.
+ // +optional
+ // +kubebuilder:validation:XPreserveUnknownFields
+ Value *runtime.RawExtension `json:"value,omitempty"`
+}
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create. This is a copy of customizable fields from metav1.ObjectMeta.
+//
+// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`,
+// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases
+// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies
+// the API and some issues that can impact user experience.
+//
+// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054)
+// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs,
+// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`.
+// The investigation showed that `controller-tools@v2` behaves differently than its previous version
+// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.
+//
+// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta`
+// had validation properties, including for `creationTimestamp` (metav1.Time).
+// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null`
+// which breaks validation because the field isn't marked as nullable.
+//
+// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded
+// types. When that happens, this hack should be revisited.
+type ObjectMeta struct {
+ // Name must be unique within a namespace. Is required when creating resources, although
+ // some resources may allow a client to request the generation of an appropriate name
+ // automatically. Name is primarily intended for creation idempotence and configuration
+ // definition.
+ // Cannot be updated.
+ // More info: http://kubernetes.io/docs/user-guide/identifiers#names
+ // +optional
+ Name string `json:"name,omitempty"`
+
+ // GenerateName is an optional prefix, used by the server, to generate a unique
+ // name ONLY IF the Name field has not been provided.
+ // If this field is used, the name returned to the client will be different
+ // than the name passed. This value will also be combined with a unique suffix.
+ // The provided value has the same validation rules as the Name field,
+ // and may be truncated by the length of the suffix required to make the value
+ // unique on the server.
+ //
+ // If this field is specified and the generated name exists, the server will
+ // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
+ // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+ // should retry (optionally after the time indicated in the Retry-After header).
+ //
+ // Applied only if Name is not specified.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
+ // +optional
+ GenerateName string `json:"generateName,omitempty"`
+
+ // Namespace defines the space within each name must be unique. An empty namespace is
+ // equivalent to the "default" namespace, but "default" is the canonical representation.
+ // Not all objects are required to be scoped to a namespace - the value of this field for
+ // those objects will be empty.
+ //
+ // Must be a DNS_LABEL.
+ // Cannot be updated.
+ // More info: http://kubernetes.io/docs/user-guide/namespaces
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+
+ // Map of string keys and values that can be used to organize and categorize
+ // (scope and select) objects. May match selectors of replication controllers
+ // and services.
+ // More info: http://kubernetes.io/docs/user-guide/labels
+ // +optional
+ Labels map[string]string `json:"labels,omitempty"`
+
+ // Annotations is an unstructured key value map stored with a resource that may be
+ // set by external tools to store and retrieve arbitrary metadata. They are not
+ // queryable and should be preserved when modifying objects.
+ // More info: http://kubernetes.io/docs/user-guide/annotations
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // List of objects depended by this object. If ALL objects in the list have
+ // been deleted, this object will be garbage collected. If this object is managed by a controller,
+ // then an entry in this list will point to this controller, with the controller field set to true.
+ // There cannot be more than one managing controller.
+ // +optional
+ // +patchMergeKey=uid
+ // +patchStrategy=merge
+ OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid"`
+}
+
+// ConditionSeverity expresses the severity of a Condition Type failing.
+type ConditionSeverity string
+
+const (
+ // ConditionSeverityError specifies that a condition with `Status=False` is an error.
+ ConditionSeverityError ConditionSeverity = "Error"
+
+ // ConditionSeverityWarning specifies that a condition with `Status=False` is a warning.
+ ConditionSeverityWarning ConditionSeverity = "Warning"
+
+ // ConditionSeverityInfo specifies that a condition with `Status=False` is informative.
+ ConditionSeverityInfo ConditionSeverity = "Info"
+
+ // ConditionSeverityNone should apply only to conditions with `Status=True`.
+ ConditionSeverityNone ConditionSeverity = ""
+)
+
+// ConditionType is a valid value for Condition.Type.
+type ConditionType string
+
+// Valid conditions for a machine.
+const (
+ // MachineCreated indicates whether the machine has been created or not. If not,
+ // it should include a reason and message for the failure.
+ // NOTE: MachineCreation is here for historical reasons, MachineCreated should be used instead
+ MachineCreation ConditionType = "MachineCreation"
+ // MachineCreated indicates whether the machine has been created or not. If not,
+ // it should include a reason and message for the failure.
+ MachineCreated ConditionType = "MachineCreated"
+ // InstanceExistsCondition is set on the Machine to show whether a virtual mahcine has been created by the cloud provider.
+ InstanceExistsCondition ConditionType = "InstanceExists"
+ // RemediationAllowedCondition is set on MachineHealthChecks to show the status of whether the MachineHealthCheck is
+ // allowed to remediate any Machines or whether it is blocked from remediating any further.
+ RemediationAllowedCondition ConditionType = "RemediationAllowed"
+ // ExternalRemediationTemplateAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation.
+ // ExternalRemediationTemplateAvailable is set to false if external remediation template is not found.
+ ExternalRemediationTemplateAvailable ConditionType = "ExternalRemediationTemplateAvailable"
+ // ExternalRemediationRequestAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation.
+ // ExternalRemediationRequestAvailable is set to false if creating external remediation request fails.
+ ExternalRemediationRequestAvailable ConditionType = "ExternalRemediationRequestAvailable"
+ // MachineDrained is set on a machine to indicate that the machine has been drained. When an error occurs during
+ // the drain process, the condition will be added with a false status and details of the error.
+ MachineDrained ConditionType = "Drained"
+ // MachineDrainable is set on a machine to indicate whether or not the machine can be drained, or, whether some
+ // deletion hook is blocking the drain operation.
+ MachineDrainable ConditionType = "Drainable"
+ // MachineTerminable is set on a machine to indicate whether or not the machine can be terminated, or, whether some
+ // deletion hook is blocking the termination operation.
+ MachineTerminable ConditionType = "Terminable"
+ // IPAddressClaimedCondition is set to indicate that a machine has a claimed an IP address.
+ IPAddressClaimedCondition ConditionType = "IPAddressClaimed"
+)
+
+const (
+ // MachineCreationSucceeded indicates machine creation success.
+ MachineCreationSucceededConditionReason string = "MachineCreationSucceeded"
+ // MachineCreationFailed indicates machine creation failure.
+ MachineCreationFailedConditionReason string = "MachineCreationFailed"
+ // ErrorCheckingProviderReason is the reason used when the exist operation fails.
+ // This would normally be because we cannot contact the provider.
+ ErrorCheckingProviderReason = "ErrorCheckingProvider"
+ // InstanceMissingReason is the reason used when the machine was provisioned, but the instance has gone missing.
+ InstanceMissingReason = "InstanceMissing"
+ // InstanceNotCreatedReason is the reason used when the machine has not yet been provisioned.
+ InstanceNotCreatedReason = "InstanceNotCreated"
+ // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked
+ // from making any further remediations.
+ TooManyUnhealthyReason = "TooManyUnhealthy"
+ // ExternalRemediationTemplateNotFound is the reason used when a machine health check fails to find external remediation template.
+ ExternalRemediationTemplateNotFound = "ExternalRemediationTemplateNotFound"
+ // ExternalRemediationRequestCreationFailed is the reason used when a machine health check fails to create external remediation request.
+ ExternalRemediationRequestCreationFailed = "ExternalRemediationRequestCreationFailed"
+ // MachineHookPresent indicates that a machine lifecycle hook is blocking part of the lifecycle of the machine.
+ // This should be used with the `Drainable` and `Terminable` machine condition types.
+ MachineHookPresent = "HookPresent"
+ // MachineDrainError indicates an error occurred when draining the machine.
+ // This should be used with the `Drained` condition type.
+ MachineDrainError = "DrainError"
+ // WaitingForIPAddressReason is set to indicate that a machine is
+ // currently waiting for an IP address to be provisioned.
+ WaitingForIPAddressReason string = "WaitingForIPAddress"
+ // IPAddressClaimedReason is set to indicate the machine was able to claim an IP address during provisioning.
+ IPAddressClaimedReason string = "IPAddressesClaimed"
+)
+
+// Condition defines an observation of a Machine API resource operational state.
+type Condition struct {
+ // Type of condition in CamelCase or in foo.example.com/CamelCase.
+ // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
+ // can be useful (see .node.status.conditions), the ability to deconflict is important.
+ // +required
+ Type ConditionType `json:"type"`
+
+ // Status of the condition, one of True, False, Unknown.
+ // +required
+ Status corev1.ConditionStatus `json:"status"`
+
+ // Severity provides an explicit classification of Reason code, so the users or machines can immediately
+ // understand the current situation and act accordingly.
+ // The Severity field MUST be set only when Status=False.
+ // +optional
+ Severity ConditionSeverity `json:"severity,omitempty"`
+
+ // Last time the condition transitioned from one status to another.
+ // This should be when the underlying condition changed. If that is not known, then using the time when
+ // the API field changed is acceptable.
+ // +required
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
+
+ // The reason for the condition's last transition in CamelCase.
+ // The specific API may choose whether or not this field is considered a guaranteed API.
+ // This field may not be empty.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+
+ // A human readable message indicating details about the transition.
+ // This field may be empty.
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+// Conditions provide observations of the operational state of a Machine API resource.
+type Conditions []Condition
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go
new file mode 100644
index 0000000000..b726931982
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go
@@ -0,0 +1,216 @@
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field
+// for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine.
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type VSphereMachineProviderSpec struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // UserDataSecret contains a local reference to a secret that contains the
+ // UserData to apply to the instance
+ // +optional
+ UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"`
+ // CredentialsSecret is a reference to the secret with vSphere credentials.
+ // +optional
+ CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"`
+ // Template is the name, inventory path, or instance UUID of the template
+ // used to clone new machines.
+ Template string `json:"template"`
+ // Workspace describes the workspace to use for the machine.
+ // +optional
+ Workspace *Workspace `json:"workspace,omitempty"`
+ // Network is the network configuration for this machine's VM.
+ Network NetworkSpec `json:"network"`
+ // NumCPUs is the number of virtual processors in a virtual machine.
+ // Defaults to the analogue property value in the template from which this
+ // machine is cloned.
+ // +optional
+ NumCPUs int32 `json:"numCPUs,omitempty"`
+ // NumCPUs is the number of cores among which to distribute CPUs in this
+ // virtual machine.
+ // Defaults to the analogue property value in the template from which this
+ // machine is cloned.
+ // +optional
+ NumCoresPerSocket int32 `json:"numCoresPerSocket,omitempty"`
+ // MemoryMiB is the size of a virtual machine's memory, in MiB.
+ // Defaults to the analogue property value in the template from which this
+ // machine is cloned.
+ // +optional
+ MemoryMiB int64 `json:"memoryMiB,omitempty"`
+ // DiskGiB is the size of a virtual machine's disk, in GiB.
+ // Defaults to the analogue property value in the template from which this
+ // machine is cloned.
+ // This parameter will be ignored if 'LinkedClone' CloneMode is set.
+ // +optional
+ DiskGiB int32 `json:"diskGiB,omitempty"`
+ // tagIDs is an optional set of tags to add to an instance. Specified tagIDs
+ // must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified.
+ // +kubebuilder:validation:Pattern:="^(urn):(vmomi):(InventoryServiceTag):([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}):([^:]+)$"
+ // +kubebuilder:example=urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL
+ // +optional
+ TagIDs []string `json:"tagIDs,omitempty"`
+ // Snapshot is the name of the snapshot from which the VM was cloned
+ // +optional
+ Snapshot string `json:"snapshot"`
+ // CloneMode specifies the type of clone operation.
+ // The LinkedClone mode is only support for templates that have at least
+ // one snapshot. If the template has no snapshots, then CloneMode defaults
+ // to FullClone.
+ // When LinkedClone mode is enabled the DiskGiB field is ignored as it is
+ // not possible to expand disks of linked clones.
+ // Defaults to FullClone.
+ // When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.
+ // +optional
+ CloneMode CloneMode `json:"cloneMode,omitempty"`
+}
+
+// CloneMode is the type of clone operation used to clone a VM from a template.
+type CloneMode string
+
+const (
+ // FullClone indicates a VM will have no relationship to the source of the
+ // clone operation once the operation is complete. This is the safest clone
+ // mode, but it is not the fastest.
+ FullClone CloneMode = "fullClone"
+ // LinkedClone means resulting VMs will be dependent upon the snapshot of
+ // the source VM/template from which the VM was cloned. This is the fastest
+ // clone mode, but it also prevents expanding a VMs disk beyond the size of
+ // the source VM/template.
+ LinkedClone CloneMode = "linkedClone"
+)
+
+// NetworkSpec defines the virtual machine's network configuration.
+type NetworkSpec struct {
+ // Devices defines the virtual machine's network interfaces.
+ Devices []NetworkDeviceSpec `json:"devices"`
+}
+
+// AddressesFromPool is an IPAddressPool that will be used to create
+// IPAddressClaims for fulfillment by an external controller.
+type AddressesFromPool struct {
+ // group of the IP address pool type known to an external IPAM controller.
+ // This should be a fully qualified domain name, for example, externalipam.controller.io.
+ // +kubebuilder:example=externalipam.controller.io
+ // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"
+ // +kubebuilder:validation:Required
+ Group string `json:"group"`
+ // resource of the IP address pool type known to an external IPAM controller.
+ // It is normally the plural form of the resource kind in lowercase, for example,
+ // ippools.
+ // +kubebuilder:example=ippools
+ // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
+ // +kubebuilder:validation:Required
+ Resource string `json:"resource"`
+ // name of an IP address pool, for example, pool-config-1.
+ // +kubebuilder:example=pool-config-1
+ // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+}
+
+// NetworkDeviceSpec defines the network configuration for a virtual machine's
+// network device.
+type NetworkDeviceSpec struct {
+ // networkName is the name of the vSphere network or port group to which the network
+ // device will be connected, for example, port-group-1. When not provided, the vCenter
+ // API will attempt to select a default network.
+ // The available networks (port groups) can be listed using `govc ls 'network/*'`
+ // +kubebuilder:example=port-group-1
+ // +kubebuilder:validation:MaxLength=80
+ // +optional
+ NetworkName string `json:"networkName,omitempty"`
+
+ // gateway is an IPv4 or IPv6 address which represents the subnet gateway,
+ // for example, 192.168.1.1.
+ // +kubebuilder:validation:Format=ipv4
+ // +kubebuilder:validation:Format=ipv6
+ // +kubebuilder:example=192.168.1.1
+ // +kubebuilder:example=2001:DB8:0000:0000:244:17FF:FEB6:D37D
+ // +optional
+ Gateway string `json:"gateway,omitempty"`
+
+ // ipAddrs is a list of one or more IPv4 and/or IPv6 addresses and CIDR to assign to
+ // this device, for example, 192.168.1.100/24. IP addresses provided via ipAddrs are
+ // intended to allow explicit assignment of a machine's IP address. IP pool configurations
+ // provided via addressesFromPool, however, defer IP address assignment to an external controller.
+ // If both addressesFromPool and ipAddrs are empty or not defined, DHCP will be used to assign
+ // an IP address. If both ipAddrs and addressesFromPools are defined, the IP addresses associated with
+ // ipAddrs will be applied first followed by IP addresses from addressesFromPools.
+ // +kubebuilder:validation:Format=ipv4
+ // +kubebuilder:validation:Format=ipv6
+ // +kubebuilder:example=192.168.1.100/24
+ // +kubebuilder:example=2001:DB8:0000:0000:244:17FF:FEB6:D37D/64
+ // +optional
+ IPAddrs []string `json:"ipAddrs,omitempty"`
+
+ // nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers, for example,
+ // 8.8.8.8. a nameserver is not provided by a fulfilled IPAddressClaim. If DHCP is not the
+ // source of IP addresses for this network device, nameservers should include a valid nameserver.
+ // +kubebuilder:validation:Format=ipv4
+ // +kubebuilder:validation:Format=ipv6
+ // +kubebuilder:example=8.8.8.8
+ // +optional
+ Nameservers []string `json:"nameservers,omitempty"`
+
+ // addressesFromPools is a list of references to IP pool types and instances which are handled
+ // by an external controller. addressesFromPool configurations provided via addressesFromPools
+ // defer IP address assignment to an external controller. IP addresses provided via ipAddrs,
+ // however, are intended to allow explicit assignment of a machine's IP address. If both
+ // addressesFromPool and ipAddrs are empty or not defined, DHCP will assign an IP address.
+ // If both ipAddrs and addressesFromPools are defined, the IP addresses associated with
+ // ipAddrs will be applied first followed by IP addresses from addressesFromPools.
+ // +kubebuilder:validation:Format=ipv4
+ // +optional
+ AddressesFromPools []AddressesFromPool `json:"addressesFromPools,omitempty"`
+}
+
+// WorkspaceConfig defines a workspace configuration for the vSphere cloud
+// provider.
+type Workspace struct {
+ // Server is the IP address or FQDN of the vSphere endpoint.
+ // +optional
+ Server string `gcfg:"server,omitempty" json:"server,omitempty"`
+ // Datacenter is the datacenter in which VMs are created/located.
+ // +optional
+ Datacenter string `gcfg:"datacenter,omitempty" json:"datacenter,omitempty"`
+ // Folder is the folder in which VMs are created/located.
+ // +optional
+ Folder string `gcfg:"folder,omitempty" json:"folder,omitempty"`
+ // Datastore is the datastore in which VMs are created/located.
+ // +optional
+ Datastore string `gcfg:"default-datastore,omitempty" json:"datastore,omitempty"`
+ // ResourcePool is the resource pool in which VMs are created/located.
+ // +optional
+ ResourcePool string `gcfg:"resourcepool-path,omitempty" json:"resourcePool,omitempty"`
+}
+
+// VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field.
+// It contains VSphere-specific status information.
+// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=2
+type VSphereMachineProviderStatus struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // InstanceID is the ID of the instance in VSphere
+ // +optional
+ InstanceID *string `json:"instanceId,omitempty"`
+ // InstanceState is the provisioning state of the VSphere Instance.
+ // +optional
+ InstanceState *string `json:"instanceState,omitempty"`
+ // Conditions is a set of conditions associated with the Machine to indicate
+ // errors or other status
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+ // TaskRef is a managed object reference to a Task related to the machine.
+ // This value is set automatically at runtime and should not be set or
+ // modified by users.
+ // +optional
+ TaskRef string `json:"taskRef,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..eb8f0941af
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,1882 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ intstr "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.AMI.DeepCopyInto(&out.AMI)
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]TagSpecification, len(*in))
+ copy(*out, *in)
+ }
+ if in.IAMInstanceProfile != nil {
+ in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile
+ *out = new(AWSResourceReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ if in.CredentialsSecret != nil {
+ in, out := &in.CredentialsSecret, &out.CredentialsSecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ if in.KeyName != nil {
+ in, out := &in.KeyName, &out.KeyName
+ *out = new(string)
+ **out = **in
+ }
+ if in.PublicIP != nil {
+ in, out := &in.PublicIP, &out.PublicIP
+ *out = new(bool)
+ **out = **in
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make([]AWSResourceReference, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Subnet.DeepCopyInto(&out.Subnet)
+ out.Placement = in.Placement
+ if in.LoadBalancers != nil {
+ in, out := &in.LoadBalancers, &out.LoadBalancers
+ *out = make([]LoadBalancerReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.BlockDevices != nil {
+ in, out := &in.BlockDevices, &out.BlockDevices
+ *out = make([]BlockDeviceMappingSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SpotMarketOptions != nil {
+ in, out := &in.SpotMarketOptions, &out.SpotMarketOptions
+ *out = new(SpotMarketOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ out.MetadataServiceOptions = in.MetadataServiceOptions
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfig.
+func (in *AWSMachineProviderConfig) DeepCopy() *AWSMachineProviderConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineProviderConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AWSMachineProviderConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineProviderConfigList) DeepCopyInto(out *AWSMachineProviderConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AWSMachineProviderConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfigList.
+func (in *AWSMachineProviderConfigList) DeepCopy() *AWSMachineProviderConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineProviderConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.InstanceID != nil {
+ in, out := &in.InstanceID, &out.InstanceID
+ *out = new(string)
+ **out = **in
+ }
+ if in.InstanceState != nil {
+ in, out := &in.InstanceState, &out.InstanceState
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderStatus.
+func (in *AWSMachineProviderStatus) DeepCopy() *AWSMachineProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSMachineProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = new(string)
+ **out = **in
+ }
+ if in.ARN != nil {
+ in, out := &in.ARN, &out.ARN
+ *out = new(string)
+ **out = **in
+ }
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]Filter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference.
+func (in *AWSResourceReference) DeepCopy() *AWSResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AddressesFromPool) DeepCopyInto(out *AddressesFromPool) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesFromPool.
+func (in *AddressesFromPool) DeepCopy() *AddressesFromPool {
+ if in == nil {
+ return nil
+ }
+ out := new(AddressesFromPool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureBootDiagnostics) DeepCopyInto(out *AzureBootDiagnostics) {
+ *out = *in
+ if in.CustomerManaged != nil {
+ in, out := &in.CustomerManaged, &out.CustomerManaged
+ *out = new(AzureCustomerManagedBootDiagnostics)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBootDiagnostics.
+func (in *AzureBootDiagnostics) DeepCopy() *AzureBootDiagnostics {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureBootDiagnostics)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureCustomerManagedBootDiagnostics) DeepCopyInto(out *AzureCustomerManagedBootDiagnostics) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureCustomerManagedBootDiagnostics.
+func (in *AzureCustomerManagedBootDiagnostics) DeepCopy() *AzureCustomerManagedBootDiagnostics {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureCustomerManagedBootDiagnostics)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureDiagnostics) DeepCopyInto(out *AzureDiagnostics) {
+ *out = *in
+ if in.Boot != nil {
+ in, out := &in.Boot, &out.Boot
+ *out = new(AzureBootDiagnostics)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiagnostics.
+func (in *AzureDiagnostics) DeepCopy() *AzureDiagnostics {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureDiagnostics)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureMachineProviderSpec) DeepCopyInto(out *AzureMachineProviderSpec) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ if in.CredentialsSecret != nil {
+ in, out := &in.CredentialsSecret, &out.CredentialsSecret
+ *out = new(v1.SecretReference)
+ **out = **in
+ }
+ out.Image = in.Image
+ in.OSDisk.DeepCopyInto(&out.OSDisk)
+ if in.DataDisks != nil {
+ in, out := &in.DataDisks, &out.DataDisks
+ *out = make([]DataDisk, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ApplicationSecurityGroups != nil {
+ in, out := &in.ApplicationSecurityGroups, &out.ApplicationSecurityGroups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NatRule != nil {
+ in, out := &in.NatRule, &out.NatRule
+ *out = new(int64)
+ **out = **in
+ }
+ if in.SpotVMOptions != nil {
+ in, out := &in.SpotVMOptions, &out.SpotVMOptions
+ *out = new(SpotVMOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SecurityProfile != nil {
+ in, out := &in.SecurityProfile, &out.SecurityProfile
+ *out = new(SecurityProfile)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Diagnostics.DeepCopyInto(&out.Diagnostics)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderSpec.
+func (in *AzureMachineProviderSpec) DeepCopy() *AzureMachineProviderSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureMachineProviderSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AzureMachineProviderSpec) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureMachineProviderStatus) DeepCopyInto(out *AzureMachineProviderStatus) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.VMID != nil {
+ in, out := &in.VMID, &out.VMID
+ *out = new(string)
+ **out = **in
+ }
+ if in.VMState != nil {
+ in, out := &in.VMState, &out.VMState
+ *out = new(AzureVMState)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderStatus.
+func (in *AzureMachineProviderStatus) DeepCopy() *AzureMachineProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureMachineProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BlockDeviceMappingSpec) DeepCopyInto(out *BlockDeviceMappingSpec) {
+ *out = *in
+ if in.DeviceName != nil {
+ in, out := &in.DeviceName, &out.DeviceName
+ *out = new(string)
+ **out = **in
+ }
+ if in.EBS != nil {
+ in, out := &in.EBS, &out.EBS
+ *out = new(EBSBlockDeviceSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NoDevice != nil {
+ in, out := &in.NoDevice, &out.NoDevice
+ *out = new(string)
+ **out = **in
+ }
+ if in.VirtualName != nil {
+ in, out := &in.VirtualName, &out.VirtualName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingSpec.
+func (in *BlockDeviceMappingSpec) DeepCopy() *BlockDeviceMappingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BlockDeviceMappingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+ if in == nil {
+ return nil
+ }
+ out := new(Condition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Conditions) DeepCopyInto(out *Conditions) {
+ {
+ in := &in
+ *out = make(Conditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions.
+func (in Conditions) DeepCopy() Conditions {
+ if in == nil {
+ return nil
+ }
+ out := new(Conditions)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfidentialVM) DeepCopyInto(out *ConfidentialVM) {
+ *out = *in
+ out.UEFISettings = in.UEFISettings
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialVM.
+func (in *ConfidentialVM) DeepCopy() *ConfidentialVM {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfidentialVM)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DataDisk) DeepCopyInto(out *DataDisk) {
+ *out = *in
+ in.ManagedDisk.DeepCopyInto(&out.ManagedDisk)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDisk.
+func (in *DataDisk) DeepCopy() *DataDisk {
+ if in == nil {
+ return nil
+ }
+ out := new(DataDisk)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DataDiskManagedDiskParameters) DeepCopyInto(out *DataDiskManagedDiskParameters) {
+ *out = *in
+ if in.DiskEncryptionSet != nil {
+ in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet
+ *out = new(DiskEncryptionSetParameters)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskManagedDiskParameters.
+func (in *DataDiskManagedDiskParameters) DeepCopy() *DataDiskManagedDiskParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(DataDiskManagedDiskParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetParameters.
+func (in *DiskEncryptionSetParameters) DeepCopy() *DiskEncryptionSetParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(DiskEncryptionSetParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DiskSettings) DeepCopyInto(out *DiskSettings) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSettings.
+func (in *DiskSettings) DeepCopy() *DiskSettings {
+ if in == nil {
+ return nil
+ }
+ out := new(DiskSettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) {
+ *out = *in
+ if in.DeleteOnTermination != nil {
+ in, out := &in.DeleteOnTermination, &out.DeleteOnTermination
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Encrypted != nil {
+ in, out := &in.Encrypted, &out.Encrypted
+ *out = new(bool)
+ **out = **in
+ }
+ in.KMSKey.DeepCopyInto(&out.KMSKey)
+ if in.Iops != nil {
+ in, out := &in.Iops, &out.Iops
+ *out = new(int64)
+ **out = **in
+ }
+ if in.VolumeSize != nil {
+ in, out := &in.VolumeSize, &out.VolumeSize
+ *out = new(int64)
+ **out = **in
+ }
+ if in.VolumeType != nil {
+ in, out := &in.VolumeType, &out.VolumeType
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceSpec.
+func (in *EBSBlockDeviceSpec) DeepCopy() *EBSBlockDeviceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EBSBlockDeviceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Filter) DeepCopyInto(out *Filter) {
+ *out = *in
+ if in.Values != nil {
+ in, out := &in.Values, &out.Values
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter.
+func (in *Filter) DeepCopy() *Filter {
+ if in == nil {
+ return nil
+ }
+ out := new(Filter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPDisk) DeepCopyInto(out *GCPDisk) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.EncryptionKey != nil {
+ in, out := &in.EncryptionKey, &out.EncryptionKey
+ *out = new(GCPEncryptionKeyReference)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk.
+func (in *GCPDisk) DeepCopy() *GCPDisk {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPDisk)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPEncryptionKeyReference) DeepCopyInto(out *GCPEncryptionKeyReference) {
+ *out = *in
+ if in.KMSKey != nil {
+ in, out := &in.KMSKey, &out.KMSKey
+ *out = new(GCPKMSKeyReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEncryptionKeyReference.
+func (in *GCPEncryptionKeyReference) DeepCopy() *GCPEncryptionKeyReference {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPEncryptionKeyReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPGPUConfig) DeepCopyInto(out *GCPGPUConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPGPUConfig.
+func (in *GCPGPUConfig) DeepCopy() *GCPGPUConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPGPUConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference.
+func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPKMSKeyReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPMachineProviderSpec) DeepCopyInto(out *GCPMachineProviderSpec) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ if in.CredentialsSecret != nil {
+ in, out := &in.CredentialsSecret, &out.CredentialsSecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Disks != nil {
+ in, out := &in.Disks, &out.Disks
+ *out = make([]*GCPDisk, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(GCPDisk)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Metadata != nil {
+ in, out := &in.Metadata, &out.Metadata
+ *out = make([]*GCPMetadata, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(GCPMetadata)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.NetworkInterfaces != nil {
+ in, out := &in.NetworkInterfaces, &out.NetworkInterfaces
+ *out = make([]*GCPNetworkInterface, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(GCPNetworkInterface)
+ **out = **in
+ }
+ }
+ }
+ if in.ServiceAccounts != nil {
+ in, out := &in.ServiceAccounts, &out.ServiceAccounts
+ *out = make([]GCPServiceAccount, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.TargetPools != nil {
+ in, out := &in.TargetPools, &out.TargetPools
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GPUs != nil {
+ in, out := &in.GPUs, &out.GPUs
+ *out = make([]GCPGPUConfig, len(*in))
+ copy(*out, *in)
+ }
+ out.ShieldedInstanceConfig = in.ShieldedInstanceConfig
+ if in.ResourceManagerTags != nil {
+ in, out := &in.ResourceManagerTags, &out.ResourceManagerTags
+ *out = make([]ResourceManagerTag, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderSpec.
+func (in *GCPMachineProviderSpec) DeepCopy() *GCPMachineProviderSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPMachineProviderSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GCPMachineProviderSpec) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPMachineProviderStatus) DeepCopyInto(out *GCPMachineProviderStatus) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.InstanceID != nil {
+ in, out := &in.InstanceID, &out.InstanceID
+ *out = new(string)
+ **out = **in
+ }
+ if in.InstanceState != nil {
+ in, out := &in.InstanceState, &out.InstanceState
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderStatus.
+func (in *GCPMachineProviderStatus) DeepCopy() *GCPMachineProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPMachineProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPMetadata) DeepCopyInto(out *GCPMetadata) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMetadata.
+func (in *GCPMetadata) DeepCopy() *GCPMetadata {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPMetadata)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPNetworkInterface) DeepCopyInto(out *GCPNetworkInterface) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPNetworkInterface.
+func (in *GCPNetworkInterface) DeepCopy() *GCPNetworkInterface {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPNetworkInterface)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount) {
+ *out = *in
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount.
+func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPServiceAccount)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPShieldedInstanceConfig) DeepCopyInto(out *GCPShieldedInstanceConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPShieldedInstanceConfig.
+func (in *GCPShieldedInstanceConfig) DeepCopy() *GCPShieldedInstanceConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPShieldedInstanceConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Image) DeepCopyInto(out *Image) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
+func (in *Image) DeepCopy() *Image {
+ if in == nil {
+ return nil
+ }
+ out := new(Image)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LastOperation) DeepCopyInto(out *LastOperation) {
+ *out = *in
+ if in.Description != nil {
+ in, out := &in.Description, &out.Description
+ *out = new(string)
+ **out = **in
+ }
+ if in.LastUpdated != nil {
+ in, out := &in.LastUpdated, &out.LastUpdated
+ *out = (*in).DeepCopy()
+ }
+ if in.State != nil {
+ in, out := &in.State, &out.State
+ *out = new(string)
+ **out = **in
+ }
+ if in.Type != nil {
+ in, out := &in.Type, &out.Type
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation.
+func (in *LastOperation) DeepCopy() *LastOperation {
+ if in == nil {
+ return nil
+ }
+ out := new(LastOperation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook.
+func (in *LifecycleHook) DeepCopy() *LifecycleHook {
+ if in == nil {
+ return nil
+ }
+ out := new(LifecycleHook)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LifecycleHooks) DeepCopyInto(out *LifecycleHooks) {
+ *out = *in
+ if in.PreDrain != nil {
+ in, out := &in.PreDrain, &out.PreDrain
+ *out = make([]LifecycleHook, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreTerminate != nil {
+ in, out := &in.PreTerminate, &out.PreTerminate
+ *out = make([]LifecycleHook, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHooks.
+func (in *LifecycleHooks) DeepCopy() *LifecycleHooks {
+ if in == nil {
+ return nil
+ }
+ out := new(LifecycleHooks)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerReference) DeepCopyInto(out *LoadBalancerReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerReference.
+func (in *LoadBalancerReference) DeepCopy() *LoadBalancerReference {
+ if in == nil {
+ return nil
+ }
+ out := new(LoadBalancerReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Machine) DeepCopyInto(out *Machine) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine.
+func (in *Machine) DeepCopy() *Machine {
+ if in == nil {
+ return nil
+ }
+ out := new(Machine)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Machine) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineHealthCheck) DeepCopyInto(out *MachineHealthCheck) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheck.
+func (in *MachineHealthCheck) DeepCopy() *MachineHealthCheck {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineHealthCheck)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MachineHealthCheck) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineHealthCheckList) DeepCopyInto(out *MachineHealthCheckList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MachineHealthCheck, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckList.
+func (in *MachineHealthCheckList) DeepCopy() *MachineHealthCheckList {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineHealthCheckList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MachineHealthCheckList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) {
+ *out = *in
+ in.Selector.DeepCopyInto(&out.Selector)
+ if in.UnhealthyConditions != nil {
+ in, out := &in.UnhealthyConditions, &out.UnhealthyConditions
+ *out = make([]UnhealthyCondition, len(*in))
+ copy(*out, *in)
+ }
+ if in.MaxUnhealthy != nil {
+ in, out := &in.MaxUnhealthy, &out.MaxUnhealthy
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.NodeStartupTimeout != nil {
+ in, out := &in.NodeStartupTimeout, &out.NodeStartupTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.RemediationTemplate != nil {
+ in, out := &in.RemediationTemplate, &out.RemediationTemplate
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckSpec.
+func (in *MachineHealthCheckSpec) DeepCopy() *MachineHealthCheckSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineHealthCheckSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineHealthCheckStatus) DeepCopyInto(out *MachineHealthCheckStatus) {
+ *out = *in
+ if in.ExpectedMachines != nil {
+ in, out := &in.ExpectedMachines, &out.ExpectedMachines
+ *out = new(int)
+ **out = **in
+ }
+ if in.CurrentHealthy != nil {
+ in, out := &in.CurrentHealthy, &out.CurrentHealthy
+ *out = new(int)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make(Conditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckStatus.
+func (in *MachineHealthCheckStatus) DeepCopy() *MachineHealthCheckStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineHealthCheckStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineList) DeepCopyInto(out *MachineList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Machine, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList.
+func (in *MachineList) DeepCopy() *MachineList {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MachineList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineSet) DeepCopyInto(out *MachineSet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet.
+func (in *MachineSet) DeepCopy() *MachineSet {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MachineSet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineSetList) DeepCopyInto(out *MachineSetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MachineSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList.
+func (in *MachineSetList) DeepCopy() *MachineSetList {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineSetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MachineSetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ in.Selector.DeepCopyInto(&out.Selector)
+ in.Template.DeepCopyInto(&out.Template)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec.
+func (in *MachineSetSpec) DeepCopy() *MachineSetSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineSetSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) {
+ *out = *in
+ if in.ErrorReason != nil {
+ in, out := &in.ErrorReason, &out.ErrorReason
+ *out = new(MachineSetStatusError)
+ **out = **in
+ }
+ if in.ErrorMessage != nil {
+ in, out := &in.ErrorMessage, &out.ErrorMessage
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus.
+func (in *MachineSetStatus) DeepCopy() *MachineSetStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineSetStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineSpec) DeepCopyInto(out *MachineSpec) {
+ *out = *in
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.LifecycleHooks.DeepCopyInto(&out.LifecycleHooks)
+ if in.Taints != nil {
+ in, out := &in.Taints, &out.Taints
+ *out = make([]v1.Taint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.ProviderSpec.DeepCopyInto(&out.ProviderSpec)
+ if in.ProviderID != nil {
+ in, out := &in.ProviderID, &out.ProviderID
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec.
+func (in *MachineSpec) DeepCopy() *MachineSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineStatus) DeepCopyInto(out *MachineStatus) {
+ *out = *in
+ if in.NodeRef != nil {
+ in, out := &in.NodeRef, &out.NodeRef
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ if in.LastUpdated != nil {
+ in, out := &in.LastUpdated, &out.LastUpdated
+ *out = (*in).DeepCopy()
+ }
+ if in.ErrorReason != nil {
+ in, out := &in.ErrorReason, &out.ErrorReason
+ *out = new(MachineStatusError)
+ **out = **in
+ }
+ if in.ErrorMessage != nil {
+ in, out := &in.ErrorMessage, &out.ErrorMessage
+ *out = new(string)
+ **out = **in
+ }
+ if in.ProviderStatus != nil {
+ in, out := &in.ProviderStatus, &out.ProviderStatus
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]v1.NodeAddress, len(*in))
+ copy(*out, *in)
+ }
+ if in.LastOperation != nil {
+ in, out := &in.LastOperation, &out.LastOperation
+ *out = new(LastOperation)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Phase != nil {
+ in, out := &in.Phase, &out.Phase
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make(Conditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus.
+func (in *MachineStatus) DeepCopy() *MachineStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) {
+ *out = *in
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec.
+func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetadataServiceOptions) DeepCopyInto(out *MetadataServiceOptions) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataServiceOptions.
+func (in *MetadataServiceOptions) DeepCopy() *MetadataServiceOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(MetadataServiceOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) {
+ *out = *in
+ if in.IPAddrs != nil {
+ in, out := &in.IPAddrs, &out.IPAddrs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Nameservers != nil {
+ in, out := &in.Nameservers, &out.Nameservers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.AddressesFromPools != nil {
+ in, out := &in.AddressesFromPools, &out.AddressesFromPools
+ *out = make([]AddressesFromPool, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceSpec.
+func (in *NetworkDeviceSpec) DeepCopy() *NetworkDeviceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkDeviceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
+ *out = *in
+ if in.Devices != nil {
+ in, out := &in.Devices, &out.Devices
+ *out = make([]NetworkDeviceSpec, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
+func (in *NetworkSpec) DeepCopy() *NetworkSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OSDisk) DeepCopyInto(out *OSDisk) {
+ *out = *in
+ in.ManagedDisk.DeepCopyInto(&out.ManagedDisk)
+ out.DiskSettings = in.DiskSettings
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk.
+func (in *OSDisk) DeepCopy() *OSDisk {
+ if in == nil {
+ return nil
+ }
+ out := new(OSDisk)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OSDiskManagedDiskParameters) DeepCopyInto(out *OSDiskManagedDiskParameters) {
+ *out = *in
+ if in.DiskEncryptionSet != nil {
+ in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet
+ *out = new(DiskEncryptionSetParameters)
+ **out = **in
+ }
+ out.SecurityProfile = in.SecurityProfile
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDiskManagedDiskParameters.
+func (in *OSDiskManagedDiskParameters) DeepCopy() *OSDiskManagedDiskParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(OSDiskManagedDiskParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.OwnerReferences != nil {
+ in, out := &in.OwnerReferences, &out.OwnerReferences
+ *out = make([]metav1.OwnerReference, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta.
+func (in *ObjectMeta) DeepCopy() *ObjectMeta {
+ if in == nil {
+ return nil
+ }
+ out := new(ObjectMeta)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Placement) DeepCopyInto(out *Placement) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement.
+func (in *Placement) DeepCopy() *Placement {
+ if in == nil {
+ return nil
+ }
+ out := new(Placement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) {
+ *out = *in
+ if in.Value != nil {
+ in, out := &in.Value, &out.Value
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec.
+func (in *ProviderSpec) DeepCopy() *ProviderSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProviderSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceManagerTag) DeepCopyInto(out *ResourceManagerTag) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceManagerTag.
+func (in *ResourceManagerTag) DeepCopy() *ResourceManagerTag {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceManagerTag)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecurityProfile) DeepCopyInto(out *SecurityProfile) {
+ *out = *in
+ if in.EncryptionAtHost != nil {
+ in, out := &in.EncryptionAtHost, &out.EncryptionAtHost
+ *out = new(bool)
+ **out = **in
+ }
+ in.Settings.DeepCopyInto(&out.Settings)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfile.
+func (in *SecurityProfile) DeepCopy() *SecurityProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(SecurityProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecuritySettings) DeepCopyInto(out *SecuritySettings) {
+ *out = *in
+ if in.ConfidentialVM != nil {
+ in, out := &in.ConfidentialVM, &out.ConfidentialVM
+ *out = new(ConfidentialVM)
+ **out = **in
+ }
+ if in.TrustedLaunch != nil {
+ in, out := &in.TrustedLaunch, &out.TrustedLaunch
+ *out = new(TrustedLaunch)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySettings.
+func (in *SecuritySettings) DeepCopy() *SecuritySettings {
+ if in == nil {
+ return nil
+ }
+ out := new(SecuritySettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) {
+ *out = *in
+ if in.MaxPrice != nil {
+ in, out := &in.MaxPrice, &out.MaxPrice
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions.
+func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(SpotMarketOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SpotVMOptions) DeepCopyInto(out *SpotVMOptions) {
+ *out = *in
+ if in.MaxPrice != nil {
+ in, out := &in.MaxPrice, &out.MaxPrice
+ x := (*in).DeepCopy()
+ *out = &x
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotVMOptions.
+func (in *SpotVMOptions) DeepCopy() *SpotVMOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(SpotVMOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TagSpecification) DeepCopyInto(out *TagSpecification) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecification.
+func (in *TagSpecification) DeepCopy() *TagSpecification {
+ if in == nil {
+ return nil
+ }
+ out := new(TagSpecification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TrustedLaunch) DeepCopyInto(out *TrustedLaunch) {
+ *out = *in
+ out.UEFISettings = in.UEFISettings
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedLaunch.
+func (in *TrustedLaunch) DeepCopy() *TrustedLaunch {
+ if in == nil {
+ return nil
+ }
+ out := new(TrustedLaunch)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UEFISettings) DeepCopyInto(out *UEFISettings) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UEFISettings.
+func (in *UEFISettings) DeepCopy() *UEFISettings {
+ if in == nil {
+ return nil
+ }
+ out := new(UEFISettings)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UnhealthyCondition) DeepCopyInto(out *UnhealthyCondition) {
+ *out = *in
+ out.Timeout = in.Timeout
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyCondition.
+func (in *UnhealthyCondition) DeepCopy() *UnhealthyCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(UnhealthyCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VMDiskSecurityProfile) DeepCopyInto(out *VMDiskSecurityProfile) {
+ *out = *in
+ out.DiskEncryptionSet = in.DiskEncryptionSet
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMDiskSecurityProfile.
+func (in *VMDiskSecurityProfile) DeepCopy() *VMDiskSecurityProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(VMDiskSecurityProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.UserDataSecret != nil {
+ in, out := &in.UserDataSecret, &out.UserDataSecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ if in.CredentialsSecret != nil {
+ in, out := &in.CredentialsSecret, &out.CredentialsSecret
+ *out = new(v1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Workspace != nil {
+ in, out := &in.Workspace, &out.Workspace
+ *out = new(Workspace)
+ **out = **in
+ }
+ in.Network.DeepCopyInto(&out.Network)
+ if in.TagIDs != nil {
+ in, out := &in.TagIDs, &out.TagIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderSpec.
+func (in *VSphereMachineProviderSpec) DeepCopy() *VSphereMachineProviderSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VSphereMachineProviderSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VSphereMachineProviderSpec) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSphereMachineProviderStatus) DeepCopyInto(out *VSphereMachineProviderStatus) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.InstanceID != nil {
+ in, out := &in.InstanceID, &out.InstanceID
+ *out = new(string)
+ **out = **in
+ }
+ if in.InstanceState != nil {
+ in, out := &in.InstanceState, &out.InstanceState
+ *out = new(string)
+ **out = **in
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderStatus.
+func (in *VSphereMachineProviderStatus) DeepCopy() *VSphereMachineProviderStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(VSphereMachineProviderStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Workspace) DeepCopyInto(out *Workspace) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace.
+func (in *Workspace) DeepCopy() *Workspace {
+ if in == nil {
+ return nil
+ }
+ out := new(Workspace)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..34e093b258
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,138 @@
+machines.machine.openshift.io:
+ Annotations:
+ exclude.release.openshift.io/internal-openshift-hosted: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/948
+ CRDName: machines.machine.openshift.io
+ Capability: MachineAPI
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: machine-api
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: machine.openshift.io
+ HasStatus: true
+ KindName: Machine
+ Labels: {}
+ PluralName: machines
+ PrinterColumns:
+ - description: Phase of machine
+ jsonPath: .status.phase
+ name: Phase
+ type: string
+ - description: Type of instance
+ jsonPath: .metadata.labels['machine\.openshift\.io/instance-type']
+ name: Type
+ type: string
+ - description: Region associated with machine
+ jsonPath: .metadata.labels['machine\.openshift\.io/region']
+ name: Region
+ type: string
+ - description: Zone associated with machine
+ jsonPath: .metadata.labels['machine\.openshift\.io/zone']
+ name: Zone
+ type: string
+ - description: Machine age
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Node associated with machine
+ jsonPath: .status.nodeRef.name
+ name: Node
+ priority: 1
+ type: string
+ - description: Provider ID of machine created in cloud provider
+ jsonPath: .spec.providerID
+ name: ProviderID
+ priority: 1
+ type: string
+ - description: State of instance
+ jsonPath: .metadata.annotations['machine\.openshift\.io/instance-state']
+ name: State
+ priority: 1
+ type: string
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1beta1
+
+machinehealthchecks.machine.openshift.io:
+ Annotations:
+ exclude.release.openshift.io/internal-openshift-hosted: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1032
+ CRDName: machinehealthchecks.machine.openshift.io
+ Capability: MachineAPI
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: machine-api
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: machine.openshift.io
+ HasStatus: true
+ KindName: MachineHealthCheck
+ Labels: {}
+ PluralName: machinehealthchecks
+ PrinterColumns:
+ - description: Maximum number of unhealthy machines allowed
+ jsonPath: .spec.maxUnhealthy
+ name: MaxUnhealthy
+ type: string
+ - description: Number of machines currently monitored
+ jsonPath: .status.expectedMachines
+ name: ExpectedMachines
+ type: integer
+ - description: Current observed healthy machines
+ jsonPath: .status.currentHealthy
+ name: CurrentHealthy
+ type: integer
+ Scope: Namespaced
+ ShortNames:
+ - mhc
+ - mhcs
+ TopLevelFeatureGates: []
+ Version: v1beta1
+
+machinesets.machine.openshift.io:
+ Annotations:
+ exclude.release.openshift.io/internal-openshift-hosted: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1032
+ CRDName: machinesets.machine.openshift.io
+ Capability: MachineAPI
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: machine-api
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: machine.openshift.io
+ HasStatus: true
+ KindName: MachineSet
+ Labels: {}
+ PluralName: machinesets
+ PrinterColumns:
+ - description: Desired Replicas
+ jsonPath: .spec.replicas
+ name: Desired
+ type: integer
+ - description: Current Replicas
+ jsonPath: .status.replicas
+ name: Current
+ type: integer
+ - description: Ready Replicas
+ jsonPath: .status.readyReplicas
+ name: Ready
+ type: integer
+ - description: Observed number of available replicas
+ jsonPath: .status.availableReplicas
+ name: Available
+ type: string
+ - description: Machineset age
+ jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1beta1
+
diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..e8309b321d
--- /dev/null
+++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,822 @@
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AWSMachineProviderConfig = map[string]string{
+ "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "ami": "AMI is the reference to the AMI from which to create the machine instance.",
+ "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge",
+ "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.",
+ "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance",
+ "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance",
+ "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.",
+ "keyName": "KeyName is the name of the KeyPair to use for SSH",
+ "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.",
+ "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.",
+ "networkInterfaceType": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.",
+ "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.",
+ "subnet": "Subnet is a reference to the subnet to use for this instance",
+ "placement": "Placement specifies where to create the instance in AWS",
+ "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.",
+ "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html",
+ "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.",
+ "metadataServiceOptions": "MetadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html",
+ "placementGroupName": "PlacementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.",
+}
+
+func (AWSMachineProviderConfig) SwaggerDoc() map[string]string {
+ return map_AWSMachineProviderConfig
+}
+
+var map_AWSMachineProviderConfigList = map[string]string{
+ "": "AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+}
+
+func (AWSMachineProviderConfigList) SwaggerDoc() map[string]string {
+ return map_AWSMachineProviderConfigList
+}
+
+var map_AWSMachineProviderStatus = map[string]string{
+ "": "AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains AWS-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "instanceId": "InstanceID is the instance ID of the machine created in AWS",
+ "instanceState": "InstanceState is the state of the AWS instance for this machine",
+ "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status",
+}
+
+func (AWSMachineProviderStatus) SwaggerDoc() map[string]string {
+ return map_AWSMachineProviderStatus
+}
+
+var map_AWSResourceReference = map[string]string{
+ "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.",
+ "id": "ID of resource",
+ "arn": "ARN of resource",
+ "filters": "Filters is a set of filters used to identify a resource",
+}
+
+func (AWSResourceReference) SwaggerDoc() map[string]string {
+ return map_AWSResourceReference
+}
+
+var map_BlockDeviceMappingSpec = map[string]string{
+ "": "BlockDeviceMappingSpec describes a block device mapping",
+ "deviceName": "The device name exposed to the machine (for example, /dev/sdh or xvdh).",
+ "ebs": "Parameters used to automatically set up EBS volumes when the machine is launched.",
+ "noDevice": "Suppresses the specified device included in the block device mapping of the AMI.",
+ "virtualName": "The virtual device name (ephemeralN). Machine store volumes are numbered starting from 0. An machine type with 2 available machine store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available machine store volumes depends on the machine type. After you connect to the machine, you must mount the volume.\n\nConstraints: For M3 machines, you must specify machine store volumes in the block device mapping for the machine. When you launch an M3 machine, we ignore any machine store volumes specified in the block device mapping for the AMI.",
+}
+
+func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string {
+ return map_BlockDeviceMappingSpec
+}
+
+var map_EBSBlockDeviceSpec = map[string]string{
+ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice",
+ "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.",
+ "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.",
+ "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.",
+ "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.",
+ "volumeSize": "The size of the volume, in GiB.\n\nConstraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n\nDefault: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.",
+ "volumeType": "The volume type: gp2, io1, st1, sc1, or standard. Default: standard",
+}
+
+func (EBSBlockDeviceSpec) SwaggerDoc() map[string]string {
+ return map_EBSBlockDeviceSpec
+}
+
+var map_Filter = map[string]string{
+ "": "Filter is a filter used to identify an AWS resource",
+ "name": "Name of the filter. Filter names are case-sensitive.",
+ "values": "Values includes one or more filter values. Filter values are case-sensitive.",
+}
+
+func (Filter) SwaggerDoc() map[string]string {
+ return map_Filter
+}
+
+var map_LoadBalancerReference = map[string]string{
+ "": "LoadBalancerReference is a reference to a load balancer on AWS.",
+}
+
+func (LoadBalancerReference) SwaggerDoc() map[string]string {
+ return map_LoadBalancerReference
+}
+
+var map_MetadataServiceOptions = map[string]string{
+ "": "MetadataServiceOptions defines the options available to a user when configuring Instance Metadata Service (IMDS) Options.",
+ "authentication": "Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html",
+}
+
+func (MetadataServiceOptions) SwaggerDoc() map[string]string {
+ return map_MetadataServiceOptions
+}
+
+var map_Placement = map[string]string{
+ "": "Placement indicates where to create the instance in AWS",
+ "region": "Region is the region to use to create the instance",
+ "availabilityZone": "AvailabilityZone is the availability zone of the instance",
+ "tenancy": "Tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.",
+}
+
+func (Placement) SwaggerDoc() map[string]string {
+ return map_Placement
+}
+
+var map_SpotMarketOptions = map[string]string{
+ "": "SpotMarketOptions defines the options available to a user when configuring Machines to run on Spot instances. Most users should provide an empty struct.",
+ "maxPrice": "The maximum price the user is willing to pay for their instances Default: On-Demand price",
+}
+
+func (SpotMarketOptions) SwaggerDoc() map[string]string {
+ return map_SpotMarketOptions
+}
+
+var map_TagSpecification = map[string]string{
+ "": "TagSpecification is the name/value pair for a tag",
+ "name": "Name of the tag",
+ "value": "Value of the tag",
+}
+
+func (TagSpecification) SwaggerDoc() map[string]string {
+ return map_TagSpecification
+}
+
+var map_AzureBootDiagnostics = map[string]string{
+ "": "AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. This allows you to configure capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.",
+ "storageAccountType": "StorageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).",
+ "customerManaged": "CustomerManaged provides reference to the customer manager storage account.",
+}
+
+func (AzureBootDiagnostics) SwaggerDoc() map[string]string {
+ return map_AzureBootDiagnostics
+}
+
+var map_AzureCustomerManagedBootDiagnostics = map[string]string{
+ "": "AzureCustomerManagedBootDiagnostics provides reference to a customer managed storage account.",
+ "storageAccountURI": "StorageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.",
+}
+
+func (AzureCustomerManagedBootDiagnostics) SwaggerDoc() map[string]string {
+ return map_AzureCustomerManagedBootDiagnostics
+}
+
+var map_AzureDiagnostics = map[string]string{
+ "": "AzureDiagnostics is used to configure the diagnostic settings of the virtual machine.",
+ "boot": "AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. This allows you to configure capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.",
+}
+
+func (AzureDiagnostics) SwaggerDoc() map[string]string {
+ return map_AzureDiagnostics
+}
+
+var map_AzureMachineProviderSpec = map[string]string{
+ "": "AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. Required parameters such as location that are not specified by this configuration, will be defaulted by the actuator. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance",
+ "credentialsSecret": "CredentialsSecret is a reference to the secret with Azure credentials.",
+ "location": "Location is the region to use to create the instance",
+ "vmSize": "VMSize is the size of the VM to create.",
+ "image": "Image is the OS image to use to create the instance.",
+ "osDisk": "OSDisk represents the parameters for creating the OS disk.",
+ "dataDisks": "DataDisk specifies the parameters that are used to add one or more data disks to the machine.",
+ "sshPublicKey": "SSHPublicKey is the public key to use to SSH to the virtual machine.",
+ "publicIP": "PublicIP if true a public IP will be used",
+ "tags": "Tags is a list of tags to apply to the machine.",
+ "securityGroup": "Network Security Group that needs to be attached to the machine's interface. No security group will be attached if empty.",
+ "applicationSecurityGroups": "Application Security Groups that need to be attached to the machine's interface. No application security groups will be attached if zero-length.",
+ "subnet": "Subnet to use for this instance",
+ "publicLoadBalancer": "PublicLoadBalancer to use for this instance",
+ "internalLoadBalancer": "InternalLoadBalancerName to use for this instance",
+ "natRule": "NatRule to set inbound NAT rule of the load balancer",
+ "managedIdentity": "ManagedIdentity to set managed identity name",
+ "vnet": "Vnet to set virtual network name",
+ "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone",
+ "networkResourceGroup": "NetworkResourceGroup is the resource group for the virtual machine's network",
+ "resourceGroup": "ResourceGroup is the resource group for the virtual machine",
+ "spotVMOptions": "SpotVMOptions allows the ability to specify the Machine should use a Spot VM",
+ "securityProfile": "SecurityProfile specifies the Security profile settings for a virtual machine.",
+ "ultraSSDCapability": "UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.",
+ "acceleratedNetworking": "AcceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.",
+ "availabilitySet": "AvailabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.",
+ "diagnostics": "Diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.",
+ "capacityReservationGroupID": "capacityReservationGroupID specifies the capacity reservation group resource id that should be used for allocating the virtual machine. The field size should be greater than 0 and the field input must start with '/'. The input for capacityReservationGroupID must be similar to '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'. The keys which are used should be among 'subscriptions', 'providers' and 'resourcegroups' followed by valid ID or names respectively.",
+}
+
+func (AzureMachineProviderSpec) SwaggerDoc() map[string]string {
+ return map_AzureMachineProviderSpec
+}
+
+var map_AzureMachineProviderStatus = map[string]string{
+ "": "AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains Azure-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "vmId": "VMID is the ID of the virtual machine created in Azure.",
+ "vmState": "VMState is the provisioning state of the Azure virtual machine.",
+ "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status.",
+}
+
+func (AzureMachineProviderStatus) SwaggerDoc() map[string]string {
+ return map_AzureMachineProviderStatus
+}
+
+var map_ConfidentialVM = map[string]string{
+ "": "ConfidentialVM defines the UEFI settings for the virtual machine.",
+ "uefiSettings": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.",
+}
+
+func (ConfidentialVM) SwaggerDoc() map[string]string {
+ return map_ConfidentialVM
+}
+
+var map_DataDisk = map[string]string{
+ "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.",
+ "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.",
+ "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.",
+ "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".",
+ "lun": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.",
+ "cachingType": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.",
+ "deletionPolicy": "DeletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.",
+}
+
+func (DataDisk) SwaggerDoc() map[string]string {
+ return map_DataDisk
+}
+
+var map_DataDiskManagedDiskParameters = map[string]string{
+ "": "DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk.",
+ "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".",
+ "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".",
+}
+
+func (DataDiskManagedDiskParameters) SwaggerDoc() map[string]string {
+ return map_DataDiskManagedDiskParameters
+}
+
+var map_DiskEncryptionSetParameters = map[string]string{
+ "": "DiskEncryptionSetParameters is the disk encryption set properties",
+ "id": "ID is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".",
+}
+
+func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string {
+ return map_DiskEncryptionSetParameters
+}
+
+var map_DiskSettings = map[string]string{
+ "": "DiskSettings describe ephemeral disk settings for the os disk.",
+ "ephemeralStorageLocation": "EphemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.",
+}
+
+func (DiskSettings) SwaggerDoc() map[string]string {
+ return map_DiskSettings
+}
+
+var map_Image = map[string]string{
+ "": "Image is a mirror of azure sdk compute.ImageReference",
+ "publisher": "Publisher is the name of the organization that created the image",
+ "offer": "Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer",
+ "sku": "SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter",
+ "version": "Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.",
+ "resourceID": "ResourceID specifies an image to use by ID",
+ "type": "Type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information",
+}
+
+func (Image) SwaggerDoc() map[string]string {
+ return map_Image
+}
+
+var map_OSDisk = map[string]string{
+ "osType": "OSType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".",
+ "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the OS disk.",
+ "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.",
+ "diskSettings": "DiskSettings describe ephemeral disk settings for the os disk.",
+ "cachingType": "CachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.",
+}
+
+func (OSDisk) SwaggerDoc() map[string]string {
+ return map_OSDisk
+}
+
+var map_OSDiskManagedDiskParameters = map[string]string{
+ "": "OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk.",
+ "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".",
+ "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties",
+ "securityProfile": "securityProfile specifies the security profile for the managed disk.",
+}
+
+func (OSDiskManagedDiskParameters) SwaggerDoc() map[string]string {
+ return map_OSDiskManagedDiskParameters
+}
+
+var map_SecurityProfile = map[string]string{
+ "": "SecurityProfile specifies the Security profile settings for a virtual machine or virtual machine scale set.",
+ "encryptionAtHost": "encryptionAtHost indicates whether Host Encryption should be enabled or disabled for a virtual machine or virtual machine scale set. This should be disabled when SecurityEncryptionType is set to DiskWithVMGuestState. Default is disabled.",
+ "settings": "settings specify the security type and the UEFI settings of the virtual machine. This field can be set for Confidential VMs and Trusted Launch for VMs.",
+}
+
+func (SecurityProfile) SwaggerDoc() map[string]string {
+ return map_SecurityProfile
+}
+
+var map_SecuritySettings = map[string]string{
+ "": "SecuritySettings define the security type and the UEFI settings of the virtual machine.",
+ "securityType": "securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set.",
+ "confidentialVM": "confidentialVM specifies the security configuration of the virtual machine. For more information regarding Confidential VMs, please refer to: https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview",
+ "trustedLaunch": "trustedLaunch specifies the security configuration of the virtual machine. For more information regarding TrustedLaunch for VMs, please refer to: https://learn.microsoft.com/azure/virtual-machines/trusted-launch",
+}
+
+func (SecuritySettings) SwaggerDoc() map[string]string {
+ return map_SecuritySettings
+}
+
+var map_SpotVMOptions = map[string]string{
+ "": "SpotVMOptions defines the options relevant to running the Machine on Spot VMs",
+ "maxPrice": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances",
+}
+
+func (SpotVMOptions) SwaggerDoc() map[string]string {
+ return map_SpotVMOptions
+}
+
+var map_TrustedLaunch = map[string]string{
+ "": "TrustedLaunch defines the UEFI settings for the virtual machine.",
+ "uefiSettings": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.",
+}
+
+func (TrustedLaunch) SwaggerDoc() map[string]string {
+ return map_TrustedLaunch
+}
+
+var map_UEFISettings = map[string]string{
+ "": "UEFISettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.",
+ "secureBoot": "secureBoot specifies whether secure boot should be enabled on the virtual machine. Secure Boot verifies the digital signature of all boot components and halts the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.",
+ "virtualizedTrustedPlatformModule": "virtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine. When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be enabled if SecurityEncryptionType is defined. If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.",
+}
+
+func (UEFISettings) SwaggerDoc() map[string]string {
+ return map_UEFISettings
+}
+
+var map_VMDiskSecurityProfile = map[string]string{
+ "": "VMDiskSecurityProfile specifies the security profile settings for the managed disk. It can be set only for Confidential VMs.",
+ "diskEncryptionSet": "diskEncryptionSet specifies the customer managed disk encryption set resource id for the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and VMGuest blob.",
+ "securityEncryptionType": "securityEncryptionType specifies the encryption type of the managed disk. It is set to DiskWithVMGuestState to encrypt the managed disk along with the VMGuestState blob, and to VMGuestStateOnly to encrypt the VMGuestState blob only. When set to VMGuestStateOnly, the vTPM should be enabled. When set to DiskWithVMGuestState, both SecureBoot and vTPM should be enabled. If the above conditions are not fulfilled, the VM will not be created and the respective error will be returned. It can be set only for Confidential VMs. Confidential VMs are defined by their SecurityProfile.SecurityType being set to ConfidentialVM, the SecurityEncryptionType of their OS disk being set to one of the allowed values and by enabling the respective SecurityProfile.UEFISettings of the VM (i.e. vTPM and SecureBoot), depending on the selected SecurityEncryptionType. For further details on Azure Confidential VMs, please refer to the respective documentation: https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview",
+}
+
+func (VMDiskSecurityProfile) SwaggerDoc() map[string]string {
+ return map_VMDiskSecurityProfile
+}
+
+var map_GCPDisk = map[string]string{
+ "": "GCPDisk describes disks for GCP.",
+ "autoDelete": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).",
+ "boot": "Boot indicates if this is a boot disk (default false).",
+ "sizeGb": "SizeGB is the size of the disk (in GB).",
+ "type": "Type is the type of the disk (eg: pd-standard).",
+ "image": "Image is the source image to create this disk.",
+ "labels": "Labels list of labels to apply to the disk.",
+ "encryptionKey": "EncryptionKey is the customer-supplied encryption key of the disk.",
+}
+
+func (GCPDisk) SwaggerDoc() map[string]string {
+ return map_GCPDisk
+}
+
+var map_GCPEncryptionKeyReference = map[string]string{
+ "": "GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption.",
+ "kmsKey": "KMSKeyName is the reference KMS key, in the format",
+ "kmsKeyServiceAccount": "KMSKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.",
+}
+
+func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string {
+ return map_GCPEncryptionKeyReference
+}
+
+var map_GCPGPUConfig = map[string]string{
+ "": "GCPGPUConfig describes type and count of GPUs attached to the instance on GCP.",
+ "count": "Count is the number of GPUs to be attached to an instance.",
+ "type": "Type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4",
+}
+
+func (GCPGPUConfig) SwaggerDoc() map[string]string {
+ return map_GCPGPUConfig
+}
+
+var map_GCPKMSKeyReference = map[string]string{
+ "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key",
+ "name": "Name is the name of the customer managed encryption key to be used for the disk encryption.",
+ "keyRing": "KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.",
+ "projectID": "ProjectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.",
+ "location": "Location is the GCP location in which the Key Ring exists.",
+}
+
+func (GCPKMSKeyReference) SwaggerDoc() map[string]string {
+ return map_GCPKMSKeyReference
+}
+
+var map_GCPMachineProviderSpec = map[string]string{
+ "": "GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance",
+ "credentialsSecret": "CredentialsSecret is a reference to the secret with GCP credentials.",
+ "canIPForward": "CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.",
+ "deletionProtection": "DeletionProtection whether the resource should be protected against deletion.",
+ "disks": "Disks is a list of disks to be attached to the VM.",
+ "labels": "Labels list of labels to apply to the VM.",
+ "gcpMetadata": "Metadata key/value pairs to apply to the VM.",
+ "networkInterfaces": "NetworkInterfaces is a list of network interfaces to be attached to the VM.",
+ "serviceAccounts": "ServiceAccounts is a list of GCP service accounts to be used by the VM.",
+ "tags": "Tags list of network tags to apply to the VM.",
+ "targetPools": "TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool",
+ "machineType": "MachineType is the machine type to use for the VM.",
+ "region": "Region is the region in which the GCP machine provider will create the VM.",
+ "zone": "Zone is the zone in which the GCP machine provider will create the VM.",
+ "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.",
+ "gpus": "GPUs is a list of GPUs to be attached to the VM.",
+ "preemptible": "Preemptible indicates if created instance is preemptible.",
+ "onHostMaintenance": "OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".",
+ "restartPolicy": "RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api",
+ "shieldedInstanceConfig": "ShieldedInstanceConfig is the Shielded VM configuration for the VM",
+ "confidentialCompute": "confidentialCompute Defines whether the instance should have confidential compute enabled. If enabled OnHostMaintenance is required to be set to \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is false.",
+ "resourceManagerTags": "resourceManagerTags is an optional list of tags to apply to the GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.",
+}
+
+func (GCPMachineProviderSpec) SwaggerDoc() map[string]string {
+ return map_GCPMachineProviderSpec
+}
+
+var map_GCPMachineProviderStatus = map[string]string{
+ "": "GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains GCP-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "instanceId": "InstanceID is the ID of the instance in GCP",
+ "instanceState": "InstanceState is the provisioning state of the GCP Instance.",
+ "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status",
+}
+
+func (GCPMachineProviderStatus) SwaggerDoc() map[string]string {
+ return map_GCPMachineProviderStatus
+}
+
+var map_GCPMetadata = map[string]string{
+ "": "GCPMetadata describes metadata for GCP.",
+ "key": "Key is the metadata key.",
+ "value": "Value is the metadata value.",
+}
+
+func (GCPMetadata) SwaggerDoc() map[string]string {
+ return map_GCPMetadata
+}
+
+var map_GCPNetworkInterface = map[string]string{
+ "": "GCPNetworkInterface describes network interfaces for GCP",
+ "publicIP": "PublicIP indicates if true a public IP will be used",
+ "network": "Network is the network name.",
+ "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.",
+ "subnetwork": "Subnetwork is the subnetwork name.",
+}
+
+func (GCPNetworkInterface) SwaggerDoc() map[string]string {
+ return map_GCPNetworkInterface
+}
+
+var map_GCPServiceAccount = map[string]string{
+ "": "GCPServiceAccount describes service accounts for GCP.",
+ "email": "Email is the service account email.",
+ "scopes": "Scopes list of scopes to be assigned to the service account.",
+}
+
+func (GCPServiceAccount) SwaggerDoc() map[string]string {
+ return map_GCPServiceAccount
+}
+
+var map_GCPShieldedInstanceConfig = map[string]string{
+ "": "GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP. Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring.",
+ "secureBoot": "SecureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.",
+ "virtualizedTrustedPlatformModule": "VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.",
+ "integrityMonitoring": "IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.",
+}
+
+func (GCPShieldedInstanceConfig) SwaggerDoc() map[string]string {
+ return map_GCPShieldedInstanceConfig
+}
+
+var map_ResourceManagerTag = map[string]string{
+ "": "ResourceManagerTag is a tag to apply to GCP resources created for the cluster.",
+ "parentID": "parentID is the ID of the hierarchical resource where the tags are defined e.g. at the Organization or the Project level. To find the Organization or Project ID ref https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects An OrganizationID can have a maximum of 32 characters and must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.",
+ "key": "key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`.",
+ "value": "value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.",
+}
+
+func (ResourceManagerTag) SwaggerDoc() map[string]string {
+ return map_ResourceManagerTag
+}
+
+var map_LastOperation = map[string]string{
+ "": "LastOperation represents the detail of the last performed operation on the MachineObject.",
+ "description": "Description is the human-readable description of the last operation.",
+ "lastUpdated": "LastUpdated is the timestamp at which LastOperation API was last-updated.",
+ "state": "State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc",
+ "type": "Type is the type of operation which was last performed. E.g. Create, Delete, Update etc",
+}
+
+func (LastOperation) SwaggerDoc() map[string]string {
+ return map_LastOperation
+}
+
+var map_LifecycleHook = map[string]string{
+ "": "LifecycleHook represents a single instance of a lifecycle hook",
+ "name": "Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.",
+ "owner": "Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.",
+}
+
+func (LifecycleHook) SwaggerDoc() map[string]string {
+ return map_LifecycleHook
+}
+
+var map_LifecycleHooks = map[string]string{
+ "": "LifecycleHooks allow users to pause operations on the machine at certain prefedined points within the machine lifecycle.",
+ "preDrain": "PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.",
+ "preTerminate": "PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.",
+}
+
+func (LifecycleHooks) SwaggerDoc() map[string]string {
+ return map_LifecycleHooks
+}
+
+var map_Machine = map[string]string{
+ "": "Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (Machine) SwaggerDoc() map[string]string {
+ return map_Machine
+}
+
+var map_MachineList = map[string]string{
+ "": "MachineList contains a list of Machine Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (MachineList) SwaggerDoc() map[string]string {
+ return map_MachineList
+}
+
+var map_MachineSpec = map[string]string{
+ "": "MachineSpec defines the desired state of Machine",
+ "metadata": "ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node.",
+ "lifecycleHooks": "LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.",
+ "taints": "The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints",
+ "providerSpec": "ProviderSpec details Provider-specific configuration to use during node creation.",
+ "providerID": "ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.",
+}
+
+func (MachineSpec) SwaggerDoc() map[string]string {
+ return map_MachineSpec
+}
+
+var map_MachineStatus = map[string]string{
+ "": "MachineStatus defines the observed state of Machine",
+ "nodeRef": "NodeRef will point to the corresponding Node if it exists.",
+ "lastUpdated": "LastUpdated identifies when this status was last observed.",
+ "errorReason": "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.",
+ "errorMessage": "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.",
+ "providerStatus": "ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.",
+ "addresses": "Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.",
+ "lastOperation": "LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.",
+ "phase": "Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting",
+ "conditions": "Conditions defines the current state of the Machine",
+}
+
+func (MachineStatus) SwaggerDoc() map[string]string {
+ return map_MachineStatus
+}
+
+var map_MachineHealthCheck = map[string]string{
+ "": "MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Specification of machine health check policy",
+ "status": "Most recently observed status of MachineHealthCheck resource",
+}
+
+func (MachineHealthCheck) SwaggerDoc() map[string]string {
+ return map_MachineHealthCheck
+}
+
+var map_MachineHealthCheckList = map[string]string{
+ "": "MachineHealthCheckList contains a list of MachineHealthCheck Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (MachineHealthCheckList) SwaggerDoc() map[string]string {
+ return map_MachineHealthCheckList
+}
+
+var map_MachineHealthCheckSpec = map[string]string{
+ "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck",
+ "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.",
+ "unhealthyConditions": "UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.",
+ "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.",
+ "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".",
+ "remediationTemplate": "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.",
+}
+
+func (MachineHealthCheckSpec) SwaggerDoc() map[string]string {
+ return map_MachineHealthCheckSpec
+}
+
+var map_MachineHealthCheckStatus = map[string]string{
+ "": "MachineHealthCheckStatus defines the observed state of MachineHealthCheck",
+ "expectedMachines": "total number of machines counted by this machine health check",
+ "currentHealthy": "total number of machines counted by this machine health check",
+ "remediationsAllowed": "RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied",
+ "conditions": "Conditions defines the current state of the MachineHealthCheck",
+}
+
+func (MachineHealthCheckStatus) SwaggerDoc() map[string]string {
+ return map_MachineHealthCheckStatus
+}
+
+var map_UnhealthyCondition = map[string]string{
+ "": "UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy.",
+ "timeout": "Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".",
+}
+
+func (UnhealthyCondition) SwaggerDoc() map[string]string {
+ return map_UnhealthyCondition
+}
+
+var map_MachineSet = map[string]string{
+ "": "MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (MachineSet) SwaggerDoc() map[string]string {
+ return map_MachineSet
+}
+
+var map_MachineSetList = map[string]string{
+ "": "MachineSetList contains a list of MachineSet Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (MachineSetList) SwaggerDoc() map[string]string {
+ return map_MachineSetList
+}
+
+var map_MachineSetSpec = map[string]string{
+ "": "MachineSetSpec defines the desired state of MachineSet",
+ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.",
+ "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)",
+ "deletePolicy": "DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"",
+ "selector": "Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "template": "Template is the object that describes the machine that will be created if insufficient replicas are detected.",
+}
+
+func (MachineSetSpec) SwaggerDoc() map[string]string {
+ return map_MachineSetSpec
+}
+
+var map_MachineSetStatus = map[string]string{
+ "": "MachineSetStatus defines the observed state of MachineSet",
+ "replicas": "Replicas is the most recently observed number of replicas.",
+ "fullyLabeledReplicas": "The number of replicas that have labels matching the labels of the machine template of the MachineSet.",
+ "readyReplicas": "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\".",
+ "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this MachineSet.",
+ "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed MachineSet.",
+ "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.",
+}
+
+func (MachineSetStatus) SwaggerDoc() map[string]string {
+ return map_MachineSetStatus
+}
+
+var map_MachineTemplateSpec = map[string]string{
+ "": "MachineTemplateSpec describes the data needed to create a Machine from a template",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (MachineTemplateSpec) SwaggerDoc() map[string]string {
+ return map_MachineTemplateSpec
+}
+
+var map_Condition = map[string]string{
+ "": "Condition defines an observation of a Machine API resource operational state.",
+ "type": "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "severity": "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.",
+ "reason": "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.",
+ "message": "A human readable message indicating details about the transition. This field may be empty.",
+}
+
+func (Condition) SwaggerDoc() map[string]string {
+ return map_Condition
+}
+
+var map_ObjectMeta = map[string]string{
+ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. This is a copy of customizable fields from metav1.ObjectMeta.\n\nObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases and read-only fields which end up in the generated CRD validation, having it as a subset simplifies the API and some issues that can impact user experience.\n\nDuring the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, specifically `spec.metadata.creationTimestamp in body must be of type string: \"null\"`. The investigation showed that `controller-tools@v2` behaves differently than its previous version when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.\n\nIn more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` had validation properties, including for `creationTimestamp` (metav1.Time). The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` which breaks validation because the field isn't marked as nullable.\n\nIn future versions, controller-tools@v2 might allow overriding the type and validation for embedded types. When that happens, this hack should be revisited.",
+ "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+ "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
+ "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+ "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+ "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+ "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+}
+
+func (ObjectMeta) SwaggerDoc() map[string]string {
+ return map_ObjectMeta
+}
+
+var map_ProviderSpec = map[string]string{
+ "": "ProviderSpec defines the configuration to use during node creation.",
+ "value": "Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.",
+}
+
+func (ProviderSpec) SwaggerDoc() map[string]string {
+ return map_ProviderSpec
+}
+
+var map_AddressesFromPool = map[string]string{
+ "": "AddressesFromPool is an IPAddressPool that will be used to create IPAddressClaims for fulfillment by an external controller.",
+ "group": "group of the IP address pool type known to an external IPAM controller. This should be a fully qualified domain name, for example, externalipam.controller.io.",
+ "resource": "resource of the IP address pool type known to an external IPAM controller. It is normally the plural form of the resource kind in lowercase, for example, ippools.",
+ "name": "name of an IP address pool, for example, pool-config-1.",
+}
+
+func (AddressesFromPool) SwaggerDoc() map[string]string {
+ return map_AddressesFromPool
+}
+
+var map_NetworkDeviceSpec = map[string]string{
+ "": "NetworkDeviceSpec defines the network configuration for a virtual machine's network device.",
+ "networkName": "networkName is the name of the vSphere network or port group to which the network device will be connected, for example, port-group-1. When not provided, the vCenter API will attempt to select a default network. The available networks (port groups) can be listed using `govc ls 'network/*'`",
+ "gateway": "gateway is an IPv4 or IPv6 address which represents the subnet gateway, for example, 192.168.1.1.",
+ "ipAddrs": "ipAddrs is a list of one or more IPv4 and/or IPv6 addresses and CIDR to assign to this device, for example, 192.168.1.100/24. IP addresses provided via ipAddrs are intended to allow explicit assignment of a machine's IP address. IP pool configurations provided via addressesFromPool, however, defer IP address assignment to an external controller. If both addressesFromPool and ipAddrs are empty or not defined, DHCP will be used to assign an IP address. If both ipAddrs and addressesFromPools are defined, the IP addresses associated with ipAddrs will be applied first followed by IP addresses from addressesFromPools.",
+ "nameservers": "nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers, for example, 8.8.8.8. a nameserver is not provided by a fulfilled IPAddressClaim. If DHCP is not the source of IP addresses for this network device, nameservers should include a valid nameserver.",
+ "addressesFromPools": "addressesFromPools is a list of references to IP pool types and instances which are handled by an external controller. addressesFromPool configurations provided via addressesFromPools defer IP address assignment to an external controller. IP addresses provided via ipAddrs, however, are intended to allow explicit assignment of a machine's IP address. If both addressesFromPool and ipAddrs are empty or not defined, DHCP will assign an IP address. If both ipAddrs and addressesFromPools are defined, the IP addresses associated with ipAddrs will be applied first followed by IP addresses from addressesFromPools.",
+}
+
+func (NetworkDeviceSpec) SwaggerDoc() map[string]string {
+ return map_NetworkDeviceSpec
+}
+
+var map_NetworkSpec = map[string]string{
+ "": "NetworkSpec defines the virtual machine's network configuration.",
+ "devices": "Devices defines the virtual machine's network interfaces.",
+}
+
+func (NetworkSpec) SwaggerDoc() map[string]string {
+ return map_NetworkSpec
+}
+
+var map_VSphereMachineProviderSpec = map[string]string{
+ "": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance",
+ "credentialsSecret": "CredentialsSecret is a reference to the secret with vSphere credentials.",
+ "template": "Template is the name, inventory path, or instance UUID of the template used to clone new machines.",
+ "workspace": "Workspace describes the workspace to use for the machine.",
+ "network": "Network is the network configuration for this machine's VM.",
+ "numCPUs": "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.",
+ "numCoresPerSocket": "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.",
+ "memoryMiB": "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.",
+ "diskGiB": "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.",
+ "tagIDs": "tagIDs is an optional set of tags to add to an instance. Specified tagIDs must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified.",
+ "snapshot": "Snapshot is the name of the snapshot from which the VM was cloned",
+ "cloneMode": "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.",
+}
+
+func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string {
+ return map_VSphereMachineProviderSpec
+}
+
+var map_VSphereMachineProviderStatus = map[string]string{
+ "": "VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains VSphere-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).",
+ "instanceId": "InstanceID is the ID of the instance in VSphere",
+ "instanceState": "InstanceState is the provisioning state of the VSphere Instance.",
+ "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status",
+ "taskRef": "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.",
+}
+
+func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string {
+ return map_VSphereMachineProviderStatus
+}
+
+var map_Workspace = map[string]string{
+ "": "WorkspaceConfig defines a workspace configuration for the vSphere cloud provider.",
+ "server": "Server is the IP address or FQDN of the vSphere endpoint.",
+ "datacenter": "Datacenter is the datacenter in which VMs are created/located.",
+ "folder": "Folder is the folder in which VMs are created/located.",
+ "datastore": "Datastore is the datastore in which VMs are created/located.",
+ "resourcePool": "ResourcePool is the resource pool in which VMs are created/located.",
+}
+
+func (Workspace) SwaggerDoc() map[string]string {
+ return map_Workspace
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/monitoring/.codegen.yaml b/vendor/github.com/openshift/api/monitoring/.codegen.yaml
new file mode 100644
index 0000000000..b865f353b6
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/.codegen.yaml
@@ -0,0 +1,8 @@
+schemapatch:
+ requiredFeatureSets:
+ - ""
+ - "Default"
+ - "TechPreviewNoUpgrade"
+swaggerdocs:
+ disabled: false
+ commentPolicy: Enforce
diff --git a/vendor/github.com/openshift/api/monitoring/install.go b/vendor/github.com/openshift/api/monitoring/install.go
new file mode 100644
index 0000000000..cc34a01dcf
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/install.go
@@ -0,0 +1,26 @@
+package monitoring
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ monitoringv1 "github.com/openshift/api/monitoring/v1"
+)
+
+const (
+ GroupName = "monitoring.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(monitoringv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/monitoring/v1/Makefile b/vendor/github.com/openshift/api/monitoring/v1/Makefile
new file mode 100644
index 0000000000..0a7a62e1c8
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="monitoring.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/monitoring/v1/doc.go b/vendor/github.com/openshift/api/monitoring/v1/doc.go
new file mode 100644
index 0000000000..bf046d6ef9
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/v1/doc.go
@@ -0,0 +1,6 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=monitoring.openshift.io
+package v1
diff --git a/vendor/github.com/openshift/api/monitoring/v1/register.go b/vendor/github.com/openshift/api/monitoring/v1/register.go
new file mode 100644
index 0000000000..342c4cca21
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/v1/register.go
@@ -0,0 +1,41 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "monitoring.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &AlertingRule{},
+ &AlertingRuleList{},
+ &AlertRelabelConfig{},
+ &AlertRelabelConfigList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/monitoring/v1/types.go b/vendor/github.com/openshift/api/monitoring/v1/types.go
new file mode 100644
index 0000000000..111538ba78
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/v1/types.go
@@ -0,0 +1,373 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// AlertingRule represents a set of user-defined Prometheus rule groups containing
+// alerting rules. This resource is the supported method for cluster admins to
+// create alerts based on metrics recorded by the platform monitoring stack in
+// OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring
+// namespace. You might use this to create custom alerting rules not shipped with
+// OpenShift based on metrics from components such as the node_exporter, which
+// provides machine-level metrics such as CPU usage, or kube-state-metrics, which
+// provides metrics on Kubernetes usage.
+//
+// The API is mostly compatible with the upstream PrometheusRule type from the
+// prometheus-operator. The primary difference being that recording rules are not
+// allowed here -- only alerting rules. For each AlertingRule resource created, a
+// corresponding PrometheusRule will be created in the openshift-monitoring
+// namespace. OpenShift requires admins to use the AlertingRule resource rather
+// than the upstream type in order to allow better OpenShift specific defaulting
+// and validation, while not modifying the upstream APIs directly.
+//
+// You can find upstream API documentation for PrometheusRule resources here:
+//
+// https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +genclient
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=alertingrules,scope=Namespaced
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1406
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=monitoring,operatorOrdering=01
+// +kubebuilder:metadata:annotations="description=OpenShift Monitoring alerting rules"
+type AlertingRule struct {
+ metav1.TypeMeta `json:",inline"`
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec describes the desired state of this AlertingRule object.
+ // +kubebuilder:validation:Required
+ Spec AlertingRuleSpec `json:"spec"`
+
+ // status describes the current state of this AlertOverrides object.
+ //
+ // +optional
+ Status AlertingRuleStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AlertingRuleList is a list of AlertingRule objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:openapi-gen=true
+type AlertingRuleList struct {
+ metav1.TypeMeta `json:",inline"`
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // items is a list of AlertingRule objects.
+ // +kubebuilder:validation:Required
+ Items []AlertingRule `json:"items"`
+}
+
+// AlertingRuleSpec is the desired state of an AlertingRule resource.
+//
+// +k8s:openapi-gen=true
+type AlertingRuleSpec struct {
+ // groups is a list of grouped alerting rules. Rule groups are the unit at
+ // which Prometheus parallelizes rule processing. All rules in a single group
+ // share a configured evaluation interval. All rules in the group will be
+ // processed together on this interval, sequentially, and all rules will be
+ // processed.
+ //
+ // It's common to group related alerting rules into a single AlertingRule
+ // resources, and within that resource, closely related alerts, or simply
+ // alerts with the same interval, into individual groups. You are also free
+ // to create AlertingRule resources with only a single rule group, but be
+ // aware that this can have a performance impact on Prometheus if the group is
+ // extremely large or has very complex query expressions to evaluate.
+ // Spreading very complex rules across multiple groups to allow them to be
+ // processed in parallel is also a common use-case.
+ //
+ // +listType=map
+ // +listMapKey=name
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:validation:Required
+ Groups []RuleGroup `json:"groups"`
+}
+
+// Duration is a valid prometheus time duration.
+// Supported units: y, w, d, h, m, s, ms
+// Examples: `30s`, `1m`, `1h20m15s`, `15d`
+// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$"
+// +kubebuilder:validation:MaxLength=2048
+type Duration string
+
+// RuleGroup is a list of sequentially evaluated alerting rules.
+//
+// +k8s:openapi-gen=true
+type RuleGroup struct {
+ // name is the name of the group.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=2048
+ Name string `json:"name"`
+
+ // interval is how often rules in the group are evaluated. If not specified,
+ // it defaults to the global.evaluation_interval configured in Prometheus,
+ // which itself defaults to 30 seconds. You can check if this value has been
+ // modified from the default on your cluster by inspecting the platform
+ // Prometheus configuration:
+ // The relevant field in that resource is: spec.evaluationInterval
+ //
+ // +optional
+ Interval Duration `json:"interval,omitempty"`
+
+ // rules is a list of sequentially evaluated alerting rules. Prometheus may
+ // process rule groups in parallel, but rules within a single group are always
+ // processed sequentially, and all rules are processed.
+ //
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:validation:Required
+ Rules []Rule `json:"rules"`
+}
+
+// Rule describes an alerting rule.
+// See Prometheus documentation:
+// - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules
+//
+// +k8s:openapi-gen=true
+type Rule struct {
+ // alert is the name of the alert. Must be a valid label value, i.e. may
+ // contain any Unicode character.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=2048
+ Alert string `json:"alert"`
+
+ // expr is the PromQL expression to evaluate. Every evaluation cycle this is
+ // evaluated at the current time, and all resultant time series become pending
+ // or firing alerts. This is most often a string representing a PromQL
+ // expression, e.g.: mapi_current_pending_csr > mapi_max_pending_csr
+ // In rare cases this could be a simple integer, e.g. a simple "1" if the
+ // intent is to create an alert that is always firing. This is sometimes used
+ // to create an always-firing "Watchdog" alert in order to ensure the alerting
+ // pipeline is functional.
+ //
+ // +kubebuilder:validation:Required
+ Expr intstr.IntOrString `json:"expr"`
+
+ // for is the time period after which alerts are considered firing after first
+ // returning results. Alerts which have not yet fired for long enough are
+ // considered pending.
+ //
+ // +optional
+ For Duration `json:"for,omitempty"`
+
+ // labels to add or overwrite for each alert. The results of the PromQL
+ // expression for the alert will result in an existing set of labels for the
+ // alert, after evaluating the expression, for any label specified here with
+ // the same name as a label in that set, the label here wins and overwrites
+ // the previous value. These should typically be short identifying values
+ // that may be useful to query against. A common example is the alert
+ // severity, where one sets `severity: warning` under the `labels` key:
+ //
+ // +optional
+ Labels map[string]string `json:"labels,omitempty"`
+
+ // annotations to add to each alert. These are values that can be used to
+ // store longer additional information that you won't query on, such as alert
+ // descriptions or runbook links.
+ //
+ // +optional
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// AlertingRuleStatus is the status of an AlertingRule resource.
+type AlertingRuleStatus struct {
+ // observedGeneration is the last generation change you've dealt with.
+ //
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // prometheusRule is the generated PrometheusRule for this AlertingRule. Each
+ // AlertingRule instance results in a generated PrometheusRule object in the
+ // same namespace, which is always the openshift-monitoring namespace.
+ //
+ // +optional
+ PrometheusRule PrometheusRuleRef `json:"prometheusRule,omitempty"`
+}
+
+// PrometheusRuleRef is a reference to an existing PrometheusRule object. Each
+// AlertingRule instance results in a generated PrometheusRule object in the same
+// namespace, which is always the openshift-monitoring namespace. This is used to
+// point to the generated PrometheusRule object in the AlertingRule status.
+type PrometheusRuleRef struct {
+ // This is a struct so that we can support future expansion of fields within
+ // the reference should we ever need to.
+
+ // name of the referenced PrometheusRule.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=2048
+ Name string `json:"name"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:subresource:status
+
+// AlertRelabelConfig defines a set of relabel configs for alerts.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:openapi-gen=true
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=alertrelabelconfigs,scope=Namespaced
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1406
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=monitoring,operatorOrdering=02
+// +kubebuilder:metadata:annotations="description=OpenShift Monitoring alert relabel configurations"
+type AlertRelabelConfig struct {
+ metav1.TypeMeta `json:",inline"`
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec describes the desired state of this AlertRelabelConfig object.
+ // +kubebuilder:validation:Required
+ Spec AlertRelabelConfigSpec `json:"spec"`
+
+ // status describes the current state of this AlertRelabelConfig object.
+ //
+ // +optional
+ Status AlertRelabelConfigStatus `json:"status,omitempty"`
+}
+
+// AlertRelabelConfigsSpec is the desired state of an AlertRelabelConfig resource.
+//
+// +k8s:openapi-gen=true
+type AlertRelabelConfigSpec struct {
+ // configs is a list of sequentially evaluated alert relabel configs.
+ //
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:validation:Required
+ Configs []RelabelConfig `json:"configs"`
+}
+
+// AlertRelabelConfigStatus is the status of an AlertRelabelConfig resource.
+type AlertRelabelConfigStatus struct {
+ // conditions contains details on the state of the AlertRelabelConfig, may be
+ // empty.
+ //
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+const (
+ // AlertRelabelConfigReady is the condition type indicating readiness.
+ AlertRelabelConfigReady string = "Ready"
+)
+
+// AlertRelabelConfigList is a list of AlertRelabelConfigs.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type AlertRelabelConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // items is a list of AlertRelabelConfigs.
+ // +kubebuilder:validation:MinItems:=1
+ // +kubebuilder:validation:Required
+ Items []*AlertRelabelConfig `json:"items"`
+}
+
+// LabelName is a valid Prometheus label name which may only contain ASCII
+// letters, numbers, and underscores.
+//
+// +kubebuilder:validation:Pattern:="^[a-zA-Z_][a-zA-Z0-9_]*$"
+// +kubebuilder:validation:MaxLength=2048
+type LabelName string
+
+// RelabelConfig allows dynamic rewriting of label sets for alerts.
+// See Prometheus documentation:
+// - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs
+// - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+//
+// +kubebuilder:validation:XValidation:rule="self.action != 'HashMod' || self.modulus != 0",message="relabel action hashmod requires non-zero modulus"
+// +kubebuilder:validation:XValidation:rule="(self.action != 'Replace' && self.action != 'HashMod') || has(self.targetLabel)",message="targetLabel is required when action is Replace or HashMod"
+// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.sourceLabels)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found sourceLabels)"
+// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.targetLabel)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found targetLabel)"
+// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.modulus)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found modulus)"
+// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.separator)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found separator)"
+// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.replacement)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found replacement)"
+// +kubebuilder:validation:XValidation:rule="!has(self.modulus) || (has(self.modulus) && size(self.sourceLabels) > 0)",message="modulus requires sourceLabels to be present"
+// +kubebuilder:validation:XValidation:rule="(self.action == 'LabelDrop' || self.action == 'LabelKeep') || has(self.sourceLabels)",message="sourceLabels is required for actions Replace, Keep, Drop, HashMod and LabelMap"
+// +kubebuilder:validation:XValidation:rule="(self.action != 'Replace' && self.action != 'LabelMap') || has(self.replacement)",message="replacement is required for actions Replace and LabelMap"
+// +k8s:openapi-gen=true
+type RelabelConfig struct {
+ // sourceLabels select values from existing labels. Their content is
+ // concatenated using the configured separator and matched against the
+ // configured regular expression for the 'Replace', 'Keep', and 'Drop' actions.
+ // Not allowed for actions 'LabelKeep' and 'LabelDrop'.
+ //
+ // +optional
+ SourceLabels []LabelName `json:"sourceLabels,omitempty"`
+
+ // separator placed between concatenated source label values. When omitted,
+ // Prometheus will use its default value of ';'.
+ //
+ // +optional
+ // +kubebuilder:validation:MaxLength=2048
+ Separator string `json:"separator,omitempty"`
+
+ // targetLabel to which the resulting value is written in a 'Replace' action.
+ // It is required for 'Replace' and 'HashMod' actions and forbidden for
+ // actions 'LabelKeep' and 'LabelDrop'. Regex capture groups
+ // are available.
+ //
+ // +optional
+ // +kubebuilder:validation:MaxLength=2048
+ TargetLabel string `json:"targetLabel,omitempty"`
+
+ // regex against which the extracted value is matched. Default is: '(.*)'
+ // regex is required for all actions except 'HashMod'
+ //
+ // +optional
+ // +kubebuilder:default=(.*)
+ // +kubebuilder:validation:MaxLength=2048
+ Regex string `json:"regex,omitempty"`
+
+ // modulus to take of the hash of the source label values. This can be
+ // combined with the 'HashMod' action to set 'target_label' to the 'modulus'
+ // of a hash of the concatenated 'source_labels'. This is only valid if
+ // sourceLabels is not empty and action is not 'LabelKeep' or 'LabelDrop'.
+ //
+ // +optional
+ Modulus uint64 `json:"modulus,omitempty"`
+
+ // replacement value against which a regex replace is performed if the regular
+ // expression matches. This is required if the action is 'Replace' or
+ // 'LabelMap' and forbidden for actions 'LabelKeep' and 'LabelDrop'.
+ // Regex capture groups are available. Default is: '$1'
+ //
+ // +optional
+ // +kubebuilder:validation:MaxLength=2048
+ Replacement string `json:"replacement,omitempty"`
+
+ // action to perform based on regex matching. Must be one of: 'Replace', 'Keep',
+ // 'Drop', 'HashMod', 'LabelMap', 'LabelDrop', or 'LabelKeep'. Default is: 'Replace'
+ //
+ // +kubebuilder:validation:Enum=Replace;Keep;Drop;HashMod;LabelMap;LabelDrop;LabelKeep
+ // +kubebuilder:default=Replace
+ // +optional
+ Action string `json:"action,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..cb472ccf54
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go
@@ -0,0 +1,314 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertRelabelConfig) DeepCopyInto(out *AlertRelabelConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfig.
+func (in *AlertRelabelConfig) DeepCopy() *AlertRelabelConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertRelabelConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlertRelabelConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertRelabelConfigList) DeepCopyInto(out *AlertRelabelConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]*AlertRelabelConfig, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(AlertRelabelConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfigList.
+func (in *AlertRelabelConfigList) DeepCopy() *AlertRelabelConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertRelabelConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlertRelabelConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertRelabelConfigSpec) DeepCopyInto(out *AlertRelabelConfigSpec) {
+ *out = *in
+ if in.Configs != nil {
+ in, out := &in.Configs, &out.Configs
+ *out = make([]RelabelConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfigSpec.
+func (in *AlertRelabelConfigSpec) DeepCopy() *AlertRelabelConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertRelabelConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertRelabelConfigStatus) DeepCopyInto(out *AlertRelabelConfigStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfigStatus.
+func (in *AlertRelabelConfigStatus) DeepCopy() *AlertRelabelConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertRelabelConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRule) DeepCopyInto(out *AlertingRule) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRule.
+func (in *AlertingRule) DeepCopy() *AlertingRule {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlertingRule) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleList) DeepCopyInto(out *AlertingRuleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AlertingRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleList.
+func (in *AlertingRuleList) DeepCopy() *AlertingRuleList {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AlertingRuleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleSpec) DeepCopyInto(out *AlertingRuleSpec) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]RuleGroup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleSpec.
+func (in *AlertingRuleSpec) DeepCopy() *AlertingRuleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AlertingRuleStatus) DeepCopyInto(out *AlertingRuleStatus) {
+ *out = *in
+ out.PrometheusRule = in.PrometheusRule
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleStatus.
+func (in *AlertingRuleStatus) DeepCopy() *AlertingRuleStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AlertingRuleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrometheusRuleRef) DeepCopyInto(out *PrometheusRuleRef) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleRef.
+func (in *PrometheusRuleRef) DeepCopy() *PrometheusRuleRef {
+ if in == nil {
+ return nil
+ }
+ out := new(PrometheusRuleRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) {
+ *out = *in
+ if in.SourceLabels != nil {
+ in, out := &in.SourceLabels, &out.SourceLabels
+ *out = make([]LabelName, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig.
+func (in *RelabelConfig) DeepCopy() *RelabelConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RelabelConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Rule) DeepCopyInto(out *Rule) {
+ *out = *in
+ out.Expr = in.Expr
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
+func (in *Rule) DeepCopy() *Rule {
+ if in == nil {
+ return nil
+ }
+ out := new(Rule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RuleGroup) DeepCopyInto(out *RuleGroup) {
+ *out = *in
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]Rule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroup.
+func (in *RuleGroup) DeepCopy() *RuleGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(RuleGroup)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..0efeba4190
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,44 @@
+alertrelabelconfigs.monitoring.openshift.io:
+ Annotations:
+ description: OpenShift Monitoring alert relabel configurations
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1406
+ CRDName: alertrelabelconfigs.monitoring.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: monitoring
+ FilenameOperatorOrdering: "02"
+ FilenameRunLevel: "0000_50"
+ GroupName: monitoring.openshift.io
+ HasStatus: true
+ KindName: AlertRelabelConfig
+ Labels: {}
+ PluralName: alertrelabelconfigs
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+alertingrules.monitoring.openshift.io:
+ Annotations:
+ description: OpenShift Monitoring alerting rules
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1406
+ CRDName: alertingrules.monitoring.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: monitoring
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_50"
+ GroupName: monitoring.openshift.io
+ HasStatus: true
+ KindName: AlertingRule
+ Labels: {}
+ PluralName: alertingrules
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..adb3837720
--- /dev/null
+++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,141 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AlertRelabelConfig = map[string]string{
+ "": "AlertRelabelConfig defines a set of relabel configs for alerts.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec describes the desired state of this AlertRelabelConfig object.",
+ "status": "status describes the current state of this AlertRelabelConfig object.",
+}
+
+func (AlertRelabelConfig) SwaggerDoc() map[string]string {
+ return map_AlertRelabelConfig
+}
+
+var map_AlertRelabelConfigList = map[string]string{
+ "": "AlertRelabelConfigList is a list of AlertRelabelConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is a list of AlertRelabelConfigs.",
+}
+
+func (AlertRelabelConfigList) SwaggerDoc() map[string]string {
+ return map_AlertRelabelConfigList
+}
+
+var map_AlertRelabelConfigSpec = map[string]string{
+ "": "AlertRelabelConfigsSpec is the desired state of an AlertRelabelConfig resource.",
+ "configs": "configs is a list of sequentially evaluated alert relabel configs.",
+}
+
+func (AlertRelabelConfigSpec) SwaggerDoc() map[string]string {
+ return map_AlertRelabelConfigSpec
+}
+
+var map_AlertRelabelConfigStatus = map[string]string{
+ "": "AlertRelabelConfigStatus is the status of an AlertRelabelConfig resource.",
+ "conditions": "conditions contains details on the state of the AlertRelabelConfig, may be empty.",
+}
+
+func (AlertRelabelConfigStatus) SwaggerDoc() map[string]string {
+ return map_AlertRelabelConfigStatus
+}
+
+var map_AlertingRule = map[string]string{
+ "": "AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage.\n\nThe API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly.\n\nYou can find upstream API documentation for PrometheusRule resources here:\n\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec describes the desired state of this AlertingRule object.",
+ "status": "status describes the current state of this AlertOverrides object.",
+}
+
+func (AlertingRule) SwaggerDoc() map[string]string {
+ return map_AlertingRule
+}
+
+var map_AlertingRuleList = map[string]string{
+ "": "AlertingRuleList is a list of AlertingRule objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is a list of AlertingRule objects.",
+}
+
+func (AlertingRuleList) SwaggerDoc() map[string]string {
+ return map_AlertingRuleList
+}
+
+var map_AlertingRuleSpec = map[string]string{
+ "": "AlertingRuleSpec is the desired state of an AlertingRule resource.",
+ "groups": "groups is a list of grouped alerting rules. Rule groups are the unit at which Prometheus parallelizes rule processing. All rules in a single group share a configured evaluation interval. All rules in the group will be processed together on this interval, sequentially, and all rules will be processed.\n\nIt's common to group related alerting rules into a single AlertingRule resources, and within that resource, closely related alerts, or simply alerts with the same interval, into individual groups. You are also free to create AlertingRule resources with only a single rule group, but be aware that this can have a performance impact on Prometheus if the group is extremely large or has very complex query expressions to evaluate. Spreading very complex rules across multiple groups to allow them to be processed in parallel is also a common use-case.",
+}
+
+func (AlertingRuleSpec) SwaggerDoc() map[string]string {
+ return map_AlertingRuleSpec
+}
+
+var map_AlertingRuleStatus = map[string]string{
+ "": "AlertingRuleStatus is the status of an AlertingRule resource.",
+ "observedGeneration": "observedGeneration is the last generation change you've dealt with.",
+ "prometheusRule": "prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace.",
+}
+
+func (AlertingRuleStatus) SwaggerDoc() map[string]string {
+ return map_AlertingRuleStatus
+}
+
+var map_PrometheusRuleRef = map[string]string{
+ "": "PrometheusRuleRef is a reference to an existing PrometheusRule object. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace. This is used to point to the generated PrometheusRule object in the AlertingRule status.",
+ "name": "name of the referenced PrometheusRule.",
+}
+
+func (PrometheusRuleRef) SwaggerDoc() map[string]string {
+ return map_PrometheusRuleRef
+}
+
+var map_RelabelConfig = map[string]string{
+ "": "RelabelConfig allows dynamic rewriting of label sets for alerts. See Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config",
+ "sourceLabels": "sourceLabels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the 'Replace', 'Keep', and 'Drop' actions. Not allowed for actions 'LabelKeep' and 'LabelDrop'.",
+ "separator": "separator placed between concatenated source label values. When omitted, Prometheus will use its default value of ';'.",
+ "targetLabel": "targetLabel to which the resulting value is written in a 'Replace' action. It is required for 'Replace' and 'HashMod' actions and forbidden for actions 'LabelKeep' and 'LabelDrop'. Regex capture groups are available.",
+ "regex": "regex against which the extracted value is matched. Default is: '(.*)' regex is required for all actions except 'HashMod'",
+ "modulus": "modulus to take of the hash of the source label values. This can be combined with the 'HashMod' action to set 'target_label' to the 'modulus' of a hash of the concatenated 'source_labels'. This is only valid if sourceLabels is not empty and action is not 'LabelKeep' or 'LabelDrop'.",
+ "replacement": "replacement value against which a regex replace is performed if the regular expression matches. This is required if the action is 'Replace' or 'LabelMap' and forbidden for actions 'LabelKeep' and 'LabelDrop'. Regex capture groups are available. Default is: '$1'",
+ "action": "action to perform based on regex matching. Must be one of: 'Replace', 'Keep', 'Drop', 'HashMod', 'LabelMap', 'LabelDrop', or 'LabelKeep'. Default is: 'Replace'",
+}
+
+func (RelabelConfig) SwaggerDoc() map[string]string {
+ return map_RelabelConfig
+}
+
+var map_Rule = map[string]string{
+ "": "Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules",
+ "alert": "alert is the name of the alert. Must be a valid label value, i.e. may contain any Unicode character.",
+ "expr": "expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.: mapi_current_pending_csr > mapi_max_pending_csr In rare cases this could be a simple integer, e.g. a simple \"1\" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing \"Watchdog\" alert in order to ensure the alerting pipeline is functional.",
+ "for": "for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending.",
+ "labels": "labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity, where one sets `severity: warning` under the `labels` key:",
+ "annotations": "annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links.",
+}
+
+func (Rule) SwaggerDoc() map[string]string {
+ return map_Rule
+}
+
+var map_RuleGroup = map[string]string{
+ "": "RuleGroup is a list of sequentially evaluated alerting rules.",
+ "name": "name is the name of the group.",
+ "interval": "interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration: The relevant field in that resource is: spec.evaluationInterval",
+ "rules": "rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed.",
+}
+
+func (RuleGroup) SwaggerDoc() map[string]string {
+ return map_RuleGroup
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/network/.codegen.yaml b/vendor/github.com/openshift/api/network/.codegen.yaml
new file mode 100644
index 0000000000..ab56605cdc
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/.codegen.yaml
@@ -0,0 +1 @@
+schemapatch:
diff --git a/vendor/github.com/openshift/api/network/OWNERS b/vendor/github.com/openshift/api/network/OWNERS
new file mode 100644
index 0000000000..279009f7ae
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/OWNERS
@@ -0,0 +1,4 @@
+reviewers:
+ - danwinship
+ - dcbw
+ - knobunc
diff --git a/vendor/github.com/openshift/api/network/install.go b/vendor/github.com/openshift/api/network/install.go
new file mode 100644
index 0000000000..fbaa079b3f
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/install.go
@@ -0,0 +1,27 @@
+package network
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ networkv1 "github.com/openshift/api/network/v1"
+ networkv1alpha1 "github.com/openshift/api/network/v1alpha1"
+)
+
+const (
+ GroupName = "network.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(networkv1.Install, networkv1alpha1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/network/v1/Makefile b/vendor/github.com/openshift/api/network/v1/Makefile
new file mode 100644
index 0000000000..027afff7ca
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/network/v1/constants.go b/vendor/github.com/openshift/api/network/v1/constants.go
new file mode 100644
index 0000000000..54c06f3319
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/constants.go
@@ -0,0 +1,17 @@
+package v1
+
+const (
+ // Pod annotations
+ AssignMacvlanAnnotation = "pod.network.openshift.io/assign-macvlan"
+
+ // HostSubnet annotations. (Note: should be "hostsubnet.network.openshift.io/", but the incorrect name is now part of the API.)
+ AssignHostSubnetAnnotation = "pod.network.openshift.io/assign-subnet"
+ FixedVNIDHostAnnotation = "pod.network.openshift.io/fixed-vnid-host"
+ NodeUIDAnnotation = "pod.network.openshift.io/node-uid"
+
+ // NetNamespace annotations
+ MulticastEnabledAnnotation = "netnamespace.network.openshift.io/multicast-enabled"
+
+ // ChangePodNetworkAnnotation is an annotation on NetNamespace to request change of pod network
+ ChangePodNetworkAnnotation string = "pod.network.openshift.io/multitenant.change-network"
+)
diff --git a/vendor/github.com/openshift/api/network/v1/doc.go b/vendor/github.com/openshift/api/network/v1/doc.go
new file mode 100644
index 0000000000..2816420d96
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/network/apis/network
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=network.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/network/v1/generated.pb.go b/vendor/github.com/openshift/api/network/v1/generated.pb.go
new file mode 100644
index 0000000000..9534e37155
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/generated.pb.go
@@ -0,0 +1,3186 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/network/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *ClusterNetwork) Reset() { *m = ClusterNetwork{} }
+func (*ClusterNetwork) ProtoMessage() {}
+func (*ClusterNetwork) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{0}
+}
+func (m *ClusterNetwork) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterNetwork) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterNetwork) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterNetwork.Merge(m, src)
+}
+func (m *ClusterNetwork) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterNetwork) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterNetwork.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterNetwork proto.InternalMessageInfo
+
+func (m *ClusterNetworkEntry) Reset() { *m = ClusterNetworkEntry{} }
+func (*ClusterNetworkEntry) ProtoMessage() {}
+func (*ClusterNetworkEntry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{1}
+}
+func (m *ClusterNetworkEntry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterNetworkEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterNetworkEntry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterNetworkEntry.Merge(m, src)
+}
+func (m *ClusterNetworkEntry) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterNetworkEntry) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterNetworkEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterNetworkEntry proto.InternalMessageInfo
+
+func (m *ClusterNetworkList) Reset() { *m = ClusterNetworkList{} }
+func (*ClusterNetworkList) ProtoMessage() {}
+func (*ClusterNetworkList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{2}
+}
+func (m *ClusterNetworkList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterNetworkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterNetworkList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterNetworkList.Merge(m, src)
+}
+func (m *ClusterNetworkList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterNetworkList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterNetworkList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterNetworkList proto.InternalMessageInfo
+
+func (m *EgressNetworkPolicy) Reset() { *m = EgressNetworkPolicy{} }
+func (*EgressNetworkPolicy) ProtoMessage() {}
+func (*EgressNetworkPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{3}
+}
+func (m *EgressNetworkPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressNetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressNetworkPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressNetworkPolicy.Merge(m, src)
+}
+func (m *EgressNetworkPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressNetworkPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressNetworkPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressNetworkPolicy proto.InternalMessageInfo
+
+func (m *EgressNetworkPolicyList) Reset() { *m = EgressNetworkPolicyList{} }
+func (*EgressNetworkPolicyList) ProtoMessage() {}
+func (*EgressNetworkPolicyList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{4}
+}
+func (m *EgressNetworkPolicyList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressNetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressNetworkPolicyList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressNetworkPolicyList.Merge(m, src)
+}
+func (m *EgressNetworkPolicyList) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressNetworkPolicyList) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressNetworkPolicyList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressNetworkPolicyList proto.InternalMessageInfo
+
+func (m *EgressNetworkPolicyPeer) Reset() { *m = EgressNetworkPolicyPeer{} }
+func (*EgressNetworkPolicyPeer) ProtoMessage() {}
+func (*EgressNetworkPolicyPeer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{5}
+}
+func (m *EgressNetworkPolicyPeer) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressNetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressNetworkPolicyPeer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressNetworkPolicyPeer.Merge(m, src)
+}
+func (m *EgressNetworkPolicyPeer) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressNetworkPolicyPeer) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressNetworkPolicyPeer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressNetworkPolicyPeer proto.InternalMessageInfo
+
+func (m *EgressNetworkPolicyRule) Reset() { *m = EgressNetworkPolicyRule{} }
+func (*EgressNetworkPolicyRule) ProtoMessage() {}
+func (*EgressNetworkPolicyRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{6}
+}
+func (m *EgressNetworkPolicyRule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressNetworkPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressNetworkPolicyRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressNetworkPolicyRule.Merge(m, src)
+}
+func (m *EgressNetworkPolicyRule) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressNetworkPolicyRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressNetworkPolicyRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressNetworkPolicyRule proto.InternalMessageInfo
+
+func (m *EgressNetworkPolicySpec) Reset() { *m = EgressNetworkPolicySpec{} }
+func (*EgressNetworkPolicySpec) ProtoMessage() {}
+func (*EgressNetworkPolicySpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{7}
+}
+func (m *EgressNetworkPolicySpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressNetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressNetworkPolicySpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressNetworkPolicySpec.Merge(m, src)
+}
+func (m *EgressNetworkPolicySpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressNetworkPolicySpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressNetworkPolicySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressNetworkPolicySpec proto.InternalMessageInfo
+
+func (m *HostSubnet) Reset() { *m = HostSubnet{} }
+func (*HostSubnet) ProtoMessage() {}
+func (*HostSubnet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{8}
+}
+func (m *HostSubnet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HostSubnet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HostSubnet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HostSubnet.Merge(m, src)
+}
+func (m *HostSubnet) XXX_Size() int {
+ return m.Size()
+}
+func (m *HostSubnet) XXX_DiscardUnknown() {
+ xxx_messageInfo_HostSubnet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HostSubnet proto.InternalMessageInfo
+
+func (m *HostSubnetList) Reset() { *m = HostSubnetList{} }
+func (*HostSubnetList) ProtoMessage() {}
+func (*HostSubnetList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{9}
+}
+func (m *HostSubnetList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HostSubnetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HostSubnetList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HostSubnetList.Merge(m, src)
+}
+func (m *HostSubnetList) XXX_Size() int {
+ return m.Size()
+}
+func (m *HostSubnetList) XXX_DiscardUnknown() {
+ xxx_messageInfo_HostSubnetList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HostSubnetList proto.InternalMessageInfo
+
+func (m *NetNamespace) Reset() { *m = NetNamespace{} }
+func (*NetNamespace) ProtoMessage() {}
+func (*NetNamespace) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{10}
+}
+func (m *NetNamespace) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetNamespace) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetNamespace.Merge(m, src)
+}
+func (m *NetNamespace) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetNamespace) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetNamespace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetNamespace proto.InternalMessageInfo
+
+func (m *NetNamespaceList) Reset() { *m = NetNamespaceList{} }
+func (*NetNamespaceList) ProtoMessage() {}
+func (*NetNamespaceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_38d1cb27735fa5d9, []int{11}
+}
+func (m *NetNamespaceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetNamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetNamespaceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetNamespaceList.Merge(m, src)
+}
+func (m *NetNamespaceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetNamespaceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetNamespaceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetNamespaceList proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*ClusterNetwork)(nil), "github.com.openshift.api.network.v1.ClusterNetwork")
+ proto.RegisterType((*ClusterNetworkEntry)(nil), "github.com.openshift.api.network.v1.ClusterNetworkEntry")
+ proto.RegisterType((*ClusterNetworkList)(nil), "github.com.openshift.api.network.v1.ClusterNetworkList")
+ proto.RegisterType((*EgressNetworkPolicy)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicy")
+ proto.RegisterType((*EgressNetworkPolicyList)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyList")
+ proto.RegisterType((*EgressNetworkPolicyPeer)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyPeer")
+ proto.RegisterType((*EgressNetworkPolicyRule)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyRule")
+ proto.RegisterType((*EgressNetworkPolicySpec)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicySpec")
+ proto.RegisterType((*HostSubnet)(nil), "github.com.openshift.api.network.v1.HostSubnet")
+ proto.RegisterType((*HostSubnetList)(nil), "github.com.openshift.api.network.v1.HostSubnetList")
+ proto.RegisterType((*NetNamespace)(nil), "github.com.openshift.api.network.v1.NetNamespace")
+ proto.RegisterType((*NetNamespaceList)(nil), "github.com.openshift.api.network.v1.NetNamespaceList")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/network/v1/generated.proto", fileDescriptor_38d1cb27735fa5d9)
+}
+
+var fileDescriptor_38d1cb27735fa5d9 = []byte{
+ // 996 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44,
+ 0x14, 0xaf, 0xf3, 0xa7, 0x6d, 0x26, 0x6d, 0x5a, 0xcd, 0x56, 0xac, 0x29, 0x92, 0x13, 0xb9, 0x02,
+ 0x82, 0x56, 0xd8, 0xb4, 0x8b, 0x50, 0x0f, 0x08, 0xb4, 0x6e, 0x2b, 0x6d, 0xa4, 0x6e, 0x88, 0x26,
+ 0x65, 0x55, 0x21, 0x40, 0xb8, 0xce, 0xac, 0x63, 0x9a, 0xd8, 0x96, 0x67, 0x12, 0x88, 0x10, 0x7f,
+ 0x2e, 0xdc, 0xf9, 0x00, 0x7c, 0x0c, 0x3e, 0x02, 0x87, 0x1e, 0x38, 0xec, 0x09, 0xf6, 0x14, 0x51,
+ 0x73, 0xe7, 0x03, 0xf4, 0x84, 0x66, 0x3c, 0x8e, 0xed, 0xac, 0x2b, 0xa2, 0x22, 0x72, 0x4a, 0xe6,
+ 0xfd, 0xde, 0xdf, 0xf9, 0xbd, 0xf7, 0xc6, 0xe0, 0xa1, 0xed, 0xd0, 0xfe, 0xe8, 0x42, 0xb3, 0xbc,
+ 0xa1, 0xee, 0xf9, 0xd8, 0x25, 0x7d, 0xe7, 0x19, 0xd5, 0x4d, 0xdf, 0xd1, 0x5d, 0x4c, 0xbf, 0xf2,
+ 0x82, 0x4b, 0x7d, 0xbc, 0xaf, 0xdb, 0xd8, 0xc5, 0x81, 0x49, 0x71, 0x4f, 0xf3, 0x03, 0x8f, 0x7a,
+ 0x70, 0x2f, 0x31, 0xd2, 0x66, 0x46, 0x9a, 0xe9, 0x3b, 0x9a, 0x30, 0xd2, 0xc6, 0xfb, 0xbb, 0x6f,
+ 0xa7, 0x3c, 0xdb, 0x9e, 0xed, 0xe9, 0xdc, 0xf6, 0x62, 0xf4, 0x8c, 0x9f, 0xf8, 0x81, 0xff, 0x8b,
+ 0x7c, 0xee, 0xbe, 0x7b, 0x79, 0x48, 0x34, 0xc7, 0x63, 0xa1, 0x87, 0xa6, 0xd5, 0x77, 0x5c, 0x1c,
+ 0x4c, 0x74, 0xff, 0xd2, 0x66, 0x02, 0xa2, 0x0f, 0x31, 0x35, 0x73, 0x32, 0xd9, 0x7d, 0xef, 0x36,
+ 0xab, 0x60, 0xe4, 0x52, 0x67, 0x88, 0x75, 0x62, 0xf5, 0xf1, 0xd0, 0x9c, 0xb7, 0x53, 0x7f, 0x2e,
+ 0x81, 0xda, 0xd1, 0x60, 0x44, 0x28, 0x0e, 0xda, 0x51, 0xca, 0xf0, 0x0b, 0xb0, 0xce, 0xa2, 0xf4,
+ 0x4c, 0x6a, 0xca, 0x52, 0x43, 0x6a, 0x56, 0x0f, 0xde, 0xd1, 0x22, 0xef, 0x5a, 0xda, 0xbb, 0xe6,
+ 0x5f, 0xda, 0x4c, 0x40, 0x34, 0xa6, 0xad, 0x8d, 0xf7, 0xb5, 0x8f, 0x2e, 0xbe, 0xc4, 0x16, 0x7d,
+ 0x82, 0xa9, 0x69, 0xc0, 0xab, 0x69, 0x7d, 0x25, 0x9c, 0xd6, 0x41, 0x22, 0x43, 0x33, 0xaf, 0xf0,
+ 0x2d, 0xb0, 0x26, 0xee, 0x47, 0x2e, 0x34, 0xa4, 0x66, 0xc5, 0xd8, 0x12, 0xea, 0x6b, 0x22, 0x07,
+ 0x14, 0xe3, 0xf0, 0x18, 0x6c, 0xf7, 0x3d, 0x42, 0xc9, 0xe8, 0xc2, 0xc5, 0x74, 0x80, 0x5d, 0x9b,
+ 0xf6, 0xe5, 0x62, 0x43, 0x6a, 0x6e, 0x1a, 0xb2, 0xb0, 0xd9, 0x7e, 0xec, 0x11, 0xda, 0xe5, 0xf8,
+ 0x29, 0xc7, 0xd1, 0x4b, 0x16, 0xf0, 0x03, 0x50, 0x23, 0x38, 0x18, 0x3b, 0x16, 0x16, 0x01, 0xe4,
+ 0x12, 0x8f, 0xfb, 0x8a, 0xf0, 0x51, 0xeb, 0x66, 0x50, 0x34, 0xa7, 0x0d, 0x0f, 0x00, 0xf0, 0x07,
+ 0x23, 0xdb, 0x71, 0xdb, 0xe6, 0x10, 0xcb, 0x65, 0x6e, 0x3b, 0x2b, 0xb1, 0x33, 0x43, 0x50, 0x4a,
+ 0x0b, 0x7e, 0x03, 0xb6, 0xac, 0xcc, 0xc5, 0x12, 0x79, 0xb5, 0x51, 0x6c, 0x56, 0x0f, 0x0e, 0xb5,
+ 0x05, 0xba, 0x46, 0xcb, 0x92, 0x72, 0xe2, 0xd2, 0x60, 0x62, 0xdc, 0x17, 0x21, 0xb7, 0xb2, 0x20,
+ 0x41, 0xf3, 0x91, 0xe0, 0x03, 0x50, 0x19, 0x7f, 0x3d, 0x30, 0xdd, 0x8e, 0x17, 0x50, 0x79, 0x8d,
+ 0xdf, 0xd7, 0x66, 0x38, 0xad, 0x57, 0x9e, 0x9e, 0x9f, 0x3e, 0x6a, 0x33, 0x21, 0x4a, 0x70, 0xf8,
+ 0x2a, 0x28, 0x0e, 0xe9, 0x48, 0x5e, 0xe7, 0x6a, 0x6b, 0xe1, 0xb4, 0x5e, 0x7c, 0x72, 0xf6, 0x31,
+ 0x62, 0x32, 0xf5, 0x5b, 0x70, 0x2f, 0x27, 0x11, 0xd8, 0x00, 0x25, 0xcb, 0xe9, 0x05, 0xbc, 0x3d,
+ 0x2a, 0xc6, 0x86, 0x48, 0xab, 0x74, 0xd4, 0x3a, 0x46, 0x88, 0x23, 0x31, 0x6f, 0x69, 0x5e, 0x38,
+ 0xd7, 0xff, 0xca, 0x5b, 0x5a, 0xa2, 0xfe, 0x26, 0x01, 0x98, 0x8d, 0x7f, 0xea, 0x10, 0x0a, 0x3f,
+ 0x7d, 0xa9, 0x43, 0xb5, 0xc5, 0x3a, 0x94, 0x59, 0xf3, 0xfe, 0xdc, 0x16, 0x49, 0xac, 0xc7, 0x92,
+ 0x54, 0x77, 0x9e, 0x83, 0xb2, 0x43, 0xf1, 0x90, 0xc8, 0x05, 0x4e, 0xd7, 0xc3, 0x3b, 0xd0, 0x65,
+ 0x6c, 0x0a, 0xff, 0xe5, 0x16, 0xf3, 0x84, 0x22, 0x87, 0xea, 0x1f, 0x12, 0xb8, 0x77, 0x62, 0x07,
+ 0x98, 0x10, 0xa1, 0xd7, 0xf1, 0x06, 0x8e, 0x35, 0x59, 0xc2, 0xc4, 0x7d, 0x0e, 0x4a, 0xc4, 0xc7,
+ 0x16, 0xa7, 0xa0, 0x7a, 0xf0, 0xfe, 0x42, 0x25, 0xe5, 0x64, 0xda, 0xf5, 0xb1, 0x95, 0xd0, 0xcd,
+ 0x4e, 0x88, 0xfb, 0x55, 0x7f, 0x97, 0xc0, 0xfd, 0x1c, 0xfd, 0x25, 0xb0, 0xf5, 0x59, 0x96, 0xad,
+ 0xc3, 0xbb, 0x96, 0x76, 0x0b, 0x65, 0xdf, 0xe5, 0xd6, 0xd5, 0xc1, 0x38, 0x80, 0x87, 0x60, 0x83,
+ 0xb5, 0x7a, 0x17, 0x0f, 0xb0, 0x45, 0xbd, 0x78, 0x18, 0x76, 0x84, 0x9b, 0x0d, 0x36, 0x0c, 0x31,
+ 0x86, 0x32, 0x9a, 0x6c, 0xff, 0xf5, 0x5c, 0xc2, 0x77, 0xc9, 0xdc, 0xfe, 0x3b, 0x6e, 0x77, 0xf9,
+ 0x22, 0x89, 0x71, 0xf5, 0x97, 0xfc, 0x8b, 0x45, 0xa3, 0x01, 0x86, 0x1f, 0x82, 0x12, 0x9d, 0xf8,
+ 0x58, 0x04, 0x7e, 0x10, 0xd3, 0x72, 0x36, 0xf1, 0xf1, 0xcd, 0xb4, 0xfe, 0xda, 0x2d, 0x66, 0x0c,
+ 0x46, 0xdc, 0x10, 0x9e, 0x83, 0x02, 0xf5, 0xfe, 0x6b, 0x4f, 0xb0, 0xbb, 0x30, 0x80, 0x08, 0x5e,
+ 0x38, 0xf3, 0x50, 0x81, 0x7a, 0xea, 0xf7, 0xb9, 0x59, 0xb3, 0x86, 0x81, 0x3d, 0xb0, 0x8a, 0x39,
+ 0x24, 0x4b, 0x9c, 0xb1, 0x3b, 0x07, 0x66, 0xc5, 0x18, 0x35, 0x11, 0x78, 0x35, 0x52, 0x40, 0xc2,
+ 0xb7, 0xfa, 0x77, 0x01, 0x80, 0x64, 0xc1, 0x2c, 0x61, 0xc2, 0x1a, 0xa0, 0xc4, 0xd6, 0x97, 0x20,
+ 0x74, 0x36, 0x23, 0x2c, 0x07, 0xc4, 0x11, 0xf8, 0x06, 0x58, 0x65, 0xbf, 0xad, 0x0e, 0x7f, 0xc0,
+ 0x2a, 0x49, 0xea, 0x8f, 0xb9, 0x14, 0x09, 0x94, 0xe9, 0x45, 0x8f, 0x97, 0x78, 0xa4, 0x66, 0x7a,
+ 0x51, 0x2d, 0x48, 0xa0, 0xf0, 0x11, 0xa8, 0x44, 0xc5, 0xb6, 0x3a, 0x44, 0x2e, 0x37, 0x8a, 0xcd,
+ 0x8a, 0xb1, 0xc7, 0x76, 0xfc, 0x49, 0x2c, 0xbc, 0x99, 0xd6, 0x61, 0x72, 0x07, 0xb1, 0x18, 0x25,
+ 0x56, 0xb0, 0x05, 0xaa, 0xd1, 0x81, 0x35, 0x6b, 0xf4, 0x3e, 0x55, 0x8c, 0x37, 0xc3, 0x69, 0xbd,
+ 0x7a, 0x92, 0x88, 0x6f, 0xa6, 0xf5, 0x9d, 0x79, 0x37, 0x7c, 0xd3, 0xa7, 0x6d, 0xd5, 0x5f, 0x25,
+ 0x50, 0x4b, 0x6d, 0xf4, 0xff, 0x7f, 0xf0, 0xcf, 0xb2, 0x83, 0xaf, 0x2f, 0xd4, 0x46, 0x49, 0x86,
+ 0xb7, 0xcc, 0xfb, 0x8f, 0x05, 0xb0, 0xd1, 0xc6, 0x94, 0xcd, 0x1e, 0xf1, 0x4d, 0x0b, 0x2f, 0xed,
+ 0x6b, 0xc8, 0xcd, 0xd9, 0x06, 0x22, 0x11, 0x14, 0xe3, 0x70, 0x0f, 0x94, 0x5d, 0x4c, 0x9d, 0x9e,
+ 0xf8, 0x04, 0x9a, 0x95, 0xd0, 0xc6, 0xb4, 0x75, 0x8c, 0x22, 0x0c, 0x1e, 0xa5, 0xfb, 0xa2, 0xc4,
+ 0x29, 0x7d, 0x7d, 0xbe, 0x2f, 0x76, 0xd2, 0x35, 0xe6, 0x74, 0x86, 0x7a, 0x25, 0x81, 0xed, 0xb4,
+ 0xce, 0x12, 0x08, 0x7d, 0x9a, 0x25, 0x74, 0x7f, 0x21, 0x42, 0xd3, 0x39, 0xe6, 0x53, 0x6a, 0xb4,
+ 0xae, 0xae, 0x95, 0x95, 0xe7, 0xd7, 0xca, 0xca, 0x8b, 0x6b, 0x65, 0xe5, 0x87, 0x50, 0x91, 0xae,
+ 0x42, 0x45, 0x7a, 0x1e, 0x2a, 0xd2, 0x8b, 0x50, 0x91, 0xfe, 0x0c, 0x15, 0xe9, 0xa7, 0xbf, 0x94,
+ 0x95, 0x4f, 0xf6, 0x16, 0xf8, 0xfe, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x4d, 0xd5, 0x11,
+ 0x25, 0x0c, 0x00, 0x00,
+}
+
+func (m *ClusterNetwork) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterNetwork) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterNetwork) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MTU != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MTU))
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.VXLANPort != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.VXLANPort))
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.ClusterNetworks) > 0 {
+ for iNdEx := len(m.ClusterNetworks) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ClusterNetworks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ i -= len(m.PluginName)
+ copy(dAtA[i:], m.PluginName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PluginName)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.ServiceNetwork)
+ copy(dAtA[i:], m.ServiceNetwork)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceNetwork)))
+ i--
+ dAtA[i] = 0x22
+ i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.Network)
+ copy(dAtA[i:], m.Network)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Network)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterNetworkEntry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterNetworkEntry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterNetworkEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength))
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.CIDR)
+ copy(dAtA[i:], m.CIDR)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterNetworkList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterNetworkList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterNetworkList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressNetworkPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressNetworkPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressNetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressNetworkPolicyList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressNetworkPolicyList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressNetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressNetworkPolicyPeer) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressNetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressNetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.DNSName)
+ copy(dAtA[i:], m.DNSName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSName)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.CIDRSelector)
+ copy(dAtA[i:], m.CIDRSelector)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRSelector)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressNetworkPolicyRule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressNetworkPolicyRule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressNetworkPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.To.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressNetworkPolicySpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressNetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressNetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Egress) > 0 {
+ for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HostSubnet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostSubnet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HostSubnet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.EgressCIDRs) > 0 {
+ for iNdEx := len(m.EgressCIDRs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EgressCIDRs[iNdEx])
+ copy(dAtA[i:], m.EgressCIDRs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressCIDRs[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.EgressIPs) > 0 {
+ for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EgressIPs[iNdEx])
+ copy(dAtA[i:], m.EgressIPs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ i -= len(m.Subnet)
+ copy(dAtA[i:], m.Subnet)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subnet)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.HostIP)
+ copy(dAtA[i:], m.HostIP)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Host)
+ copy(dAtA[i:], m.Host)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *HostSubnetList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostSubnetList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HostSubnetList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NetNamespace) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetNamespace) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.EgressIPs) > 0 {
+ for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.EgressIPs[iNdEx])
+ copy(dAtA[i:], m.EgressIPs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.NetID))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.NetName)
+ copy(dAtA[i:], m.NetName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NetName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NetNamespaceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetNamespaceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetNamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ClusterNetwork) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Network)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.HostSubnetLength))
+ l = len(m.ServiceNetwork)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PluginName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ClusterNetworks) > 0 {
+ for _, e := range m.ClusterNetworks {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.VXLANPort != nil {
+ n += 1 + sovGenerated(uint64(*m.VXLANPort))
+ }
+ if m.MTU != nil {
+ n += 1 + sovGenerated(uint64(*m.MTU))
+ }
+ return n
+}
+
+func (m *ClusterNetworkEntry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.CIDR)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.HostSubnetLength))
+ return n
+}
+
+func (m *ClusterNetworkList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EgressNetworkPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EgressNetworkPolicyList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EgressNetworkPolicyPeer) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.CIDRSelector)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DNSName)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EgressNetworkPolicyRule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.To.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EgressNetworkPolicySpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Egress) > 0 {
+ for _, e := range m.Egress {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HostSubnet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Host)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.HostIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Subnet)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.EgressIPs) > 0 {
+ for _, s := range m.EgressIPs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.EgressCIDRs) > 0 {
+ for _, s := range m.EgressCIDRs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HostSubnetList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetNamespace) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NetName)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.NetID))
+ if len(m.EgressIPs) > 0 {
+ for _, s := range m.EgressIPs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetNamespaceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ClusterNetwork) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForClusterNetworks := "[]ClusterNetworkEntry{"
+ for _, f := range this.ClusterNetworks {
+ repeatedStringForClusterNetworks += strings.Replace(strings.Replace(f.String(), "ClusterNetworkEntry", "ClusterNetworkEntry", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForClusterNetworks += "}"
+ s := strings.Join([]string{`&ClusterNetwork{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Network:` + fmt.Sprintf("%v", this.Network) + `,`,
+ `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`,
+ `ServiceNetwork:` + fmt.Sprintf("%v", this.ServiceNetwork) + `,`,
+ `PluginName:` + fmt.Sprintf("%v", this.PluginName) + `,`,
+ `ClusterNetworks:` + repeatedStringForClusterNetworks + `,`,
+ `VXLANPort:` + valueToStringGenerated(this.VXLANPort) + `,`,
+ `MTU:` + valueToStringGenerated(this.MTU) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterNetworkEntry) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterNetworkEntry{`,
+ `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`,
+ `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterNetworkList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ClusterNetwork{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterNetwork", "ClusterNetwork", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ClusterNetworkList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressNetworkPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EgressNetworkPolicy{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EgressNetworkPolicySpec", "EgressNetworkPolicySpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressNetworkPolicyList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]EgressNetworkPolicy{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicy", "EgressNetworkPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&EgressNetworkPolicyList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressNetworkPolicyPeer) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EgressNetworkPolicyPeer{`,
+ `CIDRSelector:` + fmt.Sprintf("%v", this.CIDRSelector) + `,`,
+ `DNSName:` + fmt.Sprintf("%v", this.DNSName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressNetworkPolicyRule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EgressNetworkPolicyRule{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `To:` + strings.Replace(strings.Replace(this.To.String(), "EgressNetworkPolicyPeer", "EgressNetworkPolicyPeer", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressNetworkPolicySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEgress := "[]EgressNetworkPolicyRule{"
+ for _, f := range this.Egress {
+ repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicyRule", "EgressNetworkPolicyRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForEgress += "}"
+ s := strings.Join([]string{`&EgressNetworkPolicySpec{`,
+ `Egress:` + repeatedStringForEgress + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HostSubnet) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HostSubnet{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Host:` + fmt.Sprintf("%v", this.Host) + `,`,
+ `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`,
+ `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`,
+ `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`,
+ `EgressCIDRs:` + fmt.Sprintf("%v", this.EgressCIDRs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HostSubnetList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]HostSubnet{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HostSubnet", "HostSubnet", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&HostSubnetList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetNamespace) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetNamespace{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `NetName:` + fmt.Sprintf("%v", this.NetName) + `,`,
+ `NetID:` + fmt.Sprintf("%v", this.NetID) + `,`,
+ `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetNamespaceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]NetNamespace{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetNamespace", "NetNamespace", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&NetNamespaceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ClusterNetwork) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterNetwork: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterNetwork: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Network = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType)
+ }
+ m.HostSubnetLength = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.HostSubnetLength |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceNetwork", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceNetwork = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PluginName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PluginName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterNetworks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClusterNetworks = append(m.ClusterNetworks, ClusterNetworkEntry{})
+ if err := m.ClusterNetworks[len(m.ClusterNetworks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VXLANPort", wireType)
+ }
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.VXLANPort = &v
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MTU", wireType)
+ }
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MTU = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterNetworkEntry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterNetworkEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterNetworkEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CIDR = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType)
+ }
+ m.HostSubnetLength = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.HostSubnetLength |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterNetworkList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterNetworkList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterNetworkList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ClusterNetwork{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressNetworkPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressNetworkPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressNetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressNetworkPolicyList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressNetworkPolicyList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressNetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, EgressNetworkPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressNetworkPolicyPeer) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressNetworkPolicyPeer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressNetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CIDRSelector", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CIDRSelector = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DNSName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DNSName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressNetworkPolicyRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressNetworkPolicyRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressNetworkPolicyRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = EgressNetworkPolicyRuleType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressNetworkPolicySpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressNetworkPolicySpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressNetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Egress = append(m.Egress, EgressNetworkPolicyRule{})
+ if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HostSubnet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HostSubnet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HostSubnet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Host = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HostIP = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subnet = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EgressIPs = append(m.EgressIPs, HostSubnetEgressIP(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EgressCIDRs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EgressCIDRs = append(m.EgressCIDRs, HostSubnetEgressCIDR(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HostSubnetList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HostSubnetList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HostSubnetList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, HostSubnet{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetNamespace) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetNamespace: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetNamespace: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NetName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetID", wireType)
+ }
+ m.NetID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NetID |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.EgressIPs = append(m.EgressIPs, NetNamespaceEgressIP(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetNamespaceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetNamespaceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetNamespaceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, NetNamespace{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/network/v1/generated.proto b/vendor/github.com/openshift/api/network/v1/generated.proto
new file mode 100644
index 0000000000..b7016bfb25
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/generated.proto
@@ -0,0 +1,258 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.network.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/network/v1";
+
+// ClusterNetwork describes the cluster network. There is normally only one object of this type,
+// named "default", which is created by the SDN network plugin based on the master configuration
+// when the cluster is brought up for the first time.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clusternetworks,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=001
+// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=.network,description="The primary cluster network CIDR"
+// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=.serviceNetwork,description="The service network CIDR"
+// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=.pluginName,description="The OpenShift SDN network plug-in in use"
+// +openshift:compatibility-gen:level=1
+message ClusterNetwork {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Network is a CIDR string specifying the global overlay network's L3 space
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ optional string network = 2;
+
+ // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=30
+ optional uint32 hostsubnetlength = 3;
+
+ // ServiceNetwork is the CIDR range that Service IP addresses are allocated from
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ optional string serviceNetwork = 4;
+
+ // PluginName is the name of the network plugin being used
+ optional string pluginName = 5;
+
+ // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.
+ repeated ClusterNetworkEntry clusterNetworks = 6;
+
+ // VXLANPort sets the VXLAN destination port used by the cluster.
+ // It is set by the master configuration file on startup and cannot be edited manually.
+ // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789.
+ // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:validation:Optional
+ // +optional
+ optional uint32 vxlanPort = 7;
+
+ // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.
+ // +kubebuilder:validation:Minimum=576
+ // +kubebuilder:validation:Maximum=65536
+ // +kubebuilder:validation:Optional
+ // +optional
+ optional uint32 mtu = 8;
+}
+
+// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.
+message ClusterNetworkEntry {
+ // CIDR defines the total range of a cluster networks address space.
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ optional string cidr = 1;
+
+ // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=30
+ optional uint32 hostSubnetLength = 2;
+}
+
+// ClusterNetworkList is a collection of ClusterNetworks
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ClusterNetworkList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of cluster networks
+ repeated ClusterNetwork items = 2;
+}
+
+// EgressNetworkPolicy describes the current egress network policy for a Namespace. When using
+// the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address
+// outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's
+// namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy
+// is present) then the traffic will be allowed by default.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=egressnetworkpolicies,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=004
+// +openshift:compatibility-gen:level=1
+message EgressNetworkPolicy {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is the specification of the current egress network policy
+ optional EgressNetworkPolicySpec spec = 2;
+}
+
+// EgressNetworkPolicyList is a collection of EgressNetworkPolicy
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message EgressNetworkPolicyList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is the list of policies
+ repeated EgressNetworkPolicy items = 2;
+}
+
+// EgressNetworkPolicyPeer specifies a target to apply egress network policy to
+message EgressNetworkPolicyPeer {
+ // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset
+ // Ideally we would have liked to use the cidr openapi format for this property.
+ // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs
+ // We are therefore using a regex pattern to validate instead.
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ optional string cidrSelector = 1;
+
+ // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset
+ // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$`
+ optional string dnsName = 2;
+}
+
+// EgressNetworkPolicyRule contains a single egress network policy rule
+message EgressNetworkPolicyRule {
+ // type marks this as an "Allow" or "Deny" rule
+ optional string type = 1;
+
+ // to is the target that traffic is allowed/denied to
+ optional EgressNetworkPolicyPeer to = 2;
+}
+
+// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic
+message EgressNetworkPolicySpec {
+ // egress contains the list of egress policy rules
+ repeated EgressNetworkPolicyRule egress = 1;
+}
+
+// HostSubnet describes the container subnet network on a node. The HostSubnet object must have the
+// same name as the Node object it corresponds to.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=hostsubnets,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=002
+// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=.host,description="The name of the node"
+// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=.hostIP,description="The IP address to be used as a VTEP by other nodes in the overlay network"
+// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=.subnet,description="The CIDR range of the overlay network assigned to the node for its pods"
+// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=.egressCIDRs,description="The network egress CIDRs"
+// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses"
+// +openshift:compatibility-gen:level=1
+message HostSubnet {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Host is the name of the node. (This is the same as the object's name, but both fields must be set.)
+ // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$`
+ optional string host = 2;
+
+ // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$`
+ optional string hostIP = 3;
+
+ // Subnet is the CIDR range of the overlay network assigned to the node for its pods
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ optional string subnet = 4;
+
+ // EgressIPs is the list of automatic egress IP addresses currently hosted by this node.
+ // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the
+ // master will overwrite the value here with its own allocation of egress IPs.
+ // +optional
+ repeated string egressIPs = 5;
+
+ // EgressCIDRs is the list of CIDR ranges available for automatically assigning
+ // egress IPs to this node from. If this field is set then EgressIPs should be
+ // treated as read-only.
+ // +optional
+ repeated string egressCIDRs = 6;
+}
+
+// HostSubnetList is a collection of HostSubnets
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message HostSubnetList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of host subnets
+ repeated HostSubnet items = 2;
+}
+
+// NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant
+// plugin, every Namespace will have a corresponding NetNamespace object with the same name.
+// (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.)
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=netnamespaces,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=003
+// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=.netid,description="The network identifier of the network namespace"
+// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses"
+// +openshift:compatibility-gen:level=1
+message NetNamespace {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)
+ // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$`
+ optional string netname = 2;
+
+ // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands.
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=16777215
+ optional uint32 netid = 3;
+
+ // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace.
+ // (If empty, external traffic will be masqueraded to Node IPs.)
+ // +optional
+ repeated string egressIPs = 4;
+}
+
+// NetNamespaceList is a collection of NetNamespaces
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message NetNamespaceList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of net namespaces
+ repeated NetNamespace items = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/network/v1/legacy.go b/vendor/github.com/openshift/api/network/v1/legacy.go
new file mode 100644
index 0000000000..4395ebf8e5
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/legacy.go
@@ -0,0 +1,27 @@
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &ClusterNetwork{},
+ &ClusterNetworkList{},
+ &HostSubnet{},
+ &HostSubnetList{},
+ &NetNamespace{},
+ &NetNamespaceList{},
+ &EgressNetworkPolicy{},
+ &EgressNetworkPolicyList{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/network/v1/register.go b/vendor/github.com/openshift/api/network/v1/register.go
new file mode 100644
index 0000000000..80defa7642
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/register.go
@@ -0,0 +1,44 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "network.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &ClusterNetwork{},
+ &ClusterNetworkList{},
+ &HostSubnet{},
+ &HostSubnetList{},
+ &NetNamespace{},
+ &NetNamespaceList{},
+ &EgressNetworkPolicy{},
+ &EgressNetworkPolicyList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/network/v1/types.go b/vendor/github.com/openshift/api/network/v1/types.go
new file mode 100644
index 0000000000..f39e786c20
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/types.go
@@ -0,0 +1,315 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ ClusterNetworkDefault = "default"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterNetwork describes the cluster network. There is normally only one object of this type,
+// named "default", which is created by the SDN network plugin based on the master configuration
+// when the cluster is brought up for the first time.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clusternetworks,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=001
+// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=.network,description="The primary cluster network CIDR"
+// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=.serviceNetwork,description="The service network CIDR"
+// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=.pluginName,description="The OpenShift SDN network plug-in in use"
+// +openshift:compatibility-gen:level=1
+type ClusterNetwork struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Network is a CIDR string specifying the global overlay network's L3 space
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ Network string `json:"network,omitempty" protobuf:"bytes,2,opt,name=network"`
+
+ // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=30
+ HostSubnetLength uint32 `json:"hostsubnetlength,omitempty" protobuf:"varint,3,opt,name=hostsubnetlength"`
+
+ // ServiceNetwork is the CIDR range that Service IP addresses are allocated from
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ ServiceNetwork string `json:"serviceNetwork" protobuf:"bytes,4,opt,name=serviceNetwork"`
+
+ // PluginName is the name of the network plugin being used
+ PluginName string `json:"pluginName,omitempty" protobuf:"bytes,5,opt,name=pluginName"`
+
+ // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.
+ ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks" protobuf:"bytes,6,rep,name=clusterNetworks"`
+
+ // VXLANPort sets the VXLAN destination port used by the cluster.
+ // It is set by the master configuration file on startup and cannot be edited manually.
+ // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789.
+ // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:validation:Optional
+ // +optional
+ VXLANPort *uint32 `json:"vxlanPort,omitempty" protobuf:"varint,7,opt,name=vxlanPort"`
+
+ // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.
+ // +kubebuilder:validation:Minimum=576
+ // +kubebuilder:validation:Maximum=65536
+ // +kubebuilder:validation:Optional
+ // +optional
+ MTU *uint32 `json:"mtu,omitempty" protobuf:"varint,8,opt,name=mtu"`
+}
+
+// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.
+type ClusterNetworkEntry struct {
+ // CIDR defines the total range of a cluster networks address space.
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ CIDR string `json:"CIDR" protobuf:"bytes,1,opt,name=cidr"`
+
+ // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.
+ // +kubebuilder:validation:Minimum=2
+ // +kubebuilder:validation:Maximum=30
+ HostSubnetLength uint32 `json:"hostSubnetLength" protobuf:"varint,2,opt,name=hostSubnetLength"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterNetworkList is a collection of ClusterNetworks
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterNetworkList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of cluster networks
+ Items []ClusterNetwork `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// HostSubnetEgressIP represents one egress IP address currently hosted on the node represented by
+// HostSubnet
+// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$`
+type HostSubnetEgressIP string
+
+// HostSubnetEgressCIDR represents one egress CIDR from which to assign IP addresses for this node
+// represented by the HostSubnet
+// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+type HostSubnetEgressCIDR string
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HostSubnet describes the container subnet network on a node. The HostSubnet object must have the
+// same name as the Node object it corresponds to.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=hostsubnets,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=002
+// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=.host,description="The name of the node"
+// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=.hostIP,description="The IP address to be used as a VTEP by other nodes in the overlay network"
+// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=.subnet,description="The CIDR range of the overlay network assigned to the node for its pods"
+// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=.egressCIDRs,description="The network egress CIDRs"
+// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses"
+// +openshift:compatibility-gen:level=1
+type HostSubnet struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Host is the name of the node. (This is the same as the object's name, but both fields must be set.)
+ // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$`
+ Host string `json:"host" protobuf:"bytes,2,opt,name=host"`
+
+ // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$`
+ HostIP string `json:"hostIP" protobuf:"bytes,3,opt,name=hostIP"`
+
+ // Subnet is the CIDR range of the overlay network assigned to the node for its pods
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ Subnet string `json:"subnet" protobuf:"bytes,4,opt,name=subnet"`
+
+ // EgressIPs is the list of automatic egress IP addresses currently hosted by this node.
+ // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the
+ // master will overwrite the value here with its own allocation of egress IPs.
+ // +optional
+ EgressIPs []HostSubnetEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,5,rep,name=egressIPs"`
+
+ // EgressCIDRs is the list of CIDR ranges available for automatically assigning
+ // egress IPs to this node from. If this field is set then EgressIPs should be
+ // treated as read-only.
+ // +optional
+ EgressCIDRs []HostSubnetEgressCIDR `json:"egressCIDRs,omitempty" protobuf:"bytes,6,rep,name=egressCIDRs"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HostSubnetList is a collection of HostSubnets
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type HostSubnetList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of host subnets
+ Items []HostSubnet `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// NetNamespaceEgressIP is a single egress IP out of a list of reserved IPs used as source of external traffic coming
+// from pods in this namespace
+// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$`
+type NetNamespaceEgressIP string
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant
+// plugin, every Namespace will have a corresponding NetNamespace object with the same name.
+// (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.)
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=netnamespaces,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=003
+// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=.netid,description="The network identifier of the network namespace"
+// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=.egressIPs,description="The network egress IP addresses"
+// +openshift:compatibility-gen:level=1
+type NetNamespace struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)
+ // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$`
+ NetName string `json:"netname" protobuf:"bytes,2,opt,name=netname"`
+
+ // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands.
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=16777215
+ NetID uint32 `json:"netid" protobuf:"varint,3,opt,name=netid"`
+
+ // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace.
+ // (If empty, external traffic will be masqueraded to Node IPs.)
+ // +optional
+ EgressIPs []NetNamespaceEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,4,rep,name=egressIPs"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetNamespaceList is a collection of NetNamespaces
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type NetNamespaceList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of net namespaces
+ Items []NetNamespace `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// EgressNetworkPolicyRuleType indicates whether an EgressNetworkPolicyRule allows or denies traffic
+// +kubebuilder:validation:Pattern=`^Allow|Deny$`
+type EgressNetworkPolicyRuleType string
+
+const (
+ EgressNetworkPolicyRuleAllow EgressNetworkPolicyRuleType = "Allow"
+ EgressNetworkPolicyRuleDeny EgressNetworkPolicyRuleType = "Deny"
+)
+
+// EgressNetworkPolicyPeer specifies a target to apply egress network policy to
+type EgressNetworkPolicyPeer struct {
+ // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset
+ // Ideally we would have liked to use the cidr openapi format for this property.
+ // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs
+ // We are therefore using a regex pattern to validate instead.
+ // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$`
+ CIDRSelector string `json:"cidrSelector,omitempty" protobuf:"bytes,1,rep,name=cidrSelector"`
+ // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset
+ // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$`
+ DNSName string `json:"dnsName,omitempty" protobuf:"bytes,2,rep,name=dnsName"`
+}
+
+// EgressNetworkPolicyRule contains a single egress network policy rule
+type EgressNetworkPolicyRule struct {
+ // type marks this as an "Allow" or "Deny" rule
+ Type EgressNetworkPolicyRuleType `json:"type" protobuf:"bytes,1,rep,name=type"`
+ // to is the target that traffic is allowed/denied to
+ To EgressNetworkPolicyPeer `json:"to" protobuf:"bytes,2,rep,name=to"`
+}
+
+// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic
+type EgressNetworkPolicySpec struct {
+ // egress contains the list of egress policy rules
+ Egress []EgressNetworkPolicyRule `json:"egress" protobuf:"bytes,1,rep,name=egress"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EgressNetworkPolicy describes the current egress network policy for a Namespace. When using
+// the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address
+// outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's
+// namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy
+// is present) then the traffic will be allowed by default.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=egressnetworkpolicies,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/527
+// +openshift:file-pattern=operatorOrdering=004
+// +openshift:compatibility-gen:level=1
+type EgressNetworkPolicy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec is the specification of the current egress network policy
+ Spec EgressNetworkPolicySpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EgressNetworkPolicyList is a collection of EgressNetworkPolicy
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type EgressNetworkPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is the list of policies
+ Items []EgressNetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..ab6eb72aae
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go
@@ -0,0 +1,347 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetwork) DeepCopyInto(out *ClusterNetwork) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.ClusterNetworks != nil {
+ in, out := &in.ClusterNetworks, &out.ClusterNetworks
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.VXLANPort != nil {
+ in, out := &in.VXLANPort, &out.VXLANPort
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.MTU != nil {
+ in, out := &in.MTU, &out.MTU
+ *out = new(uint32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetwork.
+func (in *ClusterNetwork) DeepCopy() *ClusterNetwork {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetwork)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterNetwork) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
+func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkList) DeepCopyInto(out *ClusterNetworkList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterNetwork, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkList.
+func (in *ClusterNetworkList) DeepCopy() *ClusterNetworkList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterNetworkList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressNetworkPolicy) DeepCopyInto(out *EgressNetworkPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicy.
+func (in *EgressNetworkPolicy) DeepCopy() *EgressNetworkPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressNetworkPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EgressNetworkPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressNetworkPolicyList) DeepCopyInto(out *EgressNetworkPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EgressNetworkPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyList.
+func (in *EgressNetworkPolicyList) DeepCopy() *EgressNetworkPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressNetworkPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EgressNetworkPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressNetworkPolicyPeer) DeepCopyInto(out *EgressNetworkPolicyPeer) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyPeer.
+func (in *EgressNetworkPolicyPeer) DeepCopy() *EgressNetworkPolicyPeer {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressNetworkPolicyPeer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressNetworkPolicyRule) DeepCopyInto(out *EgressNetworkPolicyRule) {
+ *out = *in
+ out.To = in.To
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyRule.
+func (in *EgressNetworkPolicyRule) DeepCopy() *EgressNetworkPolicyRule {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressNetworkPolicyRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressNetworkPolicySpec) DeepCopyInto(out *EgressNetworkPolicySpec) {
+ *out = *in
+ if in.Egress != nil {
+ in, out := &in.Egress, &out.Egress
+ *out = make([]EgressNetworkPolicyRule, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicySpec.
+func (in *EgressNetworkPolicySpec) DeepCopy() *EgressNetworkPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressNetworkPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostSubnet) DeepCopyInto(out *HostSubnet) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.EgressIPs != nil {
+ in, out := &in.EgressIPs, &out.EgressIPs
+ *out = make([]HostSubnetEgressIP, len(*in))
+ copy(*out, *in)
+ }
+ if in.EgressCIDRs != nil {
+ in, out := &in.EgressCIDRs, &out.EgressCIDRs
+ *out = make([]HostSubnetEgressCIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnet.
+func (in *HostSubnet) DeepCopy() *HostSubnet {
+ if in == nil {
+ return nil
+ }
+ out := new(HostSubnet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HostSubnet) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostSubnetList) DeepCopyInto(out *HostSubnetList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HostSubnet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnetList.
+func (in *HostSubnetList) DeepCopy() *HostSubnetList {
+ if in == nil {
+ return nil
+ }
+ out := new(HostSubnetList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HostSubnetList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetNamespace) DeepCopyInto(out *NetNamespace) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.EgressIPs != nil {
+ in, out := &in.EgressIPs, &out.EgressIPs
+ *out = make([]NetNamespaceEgressIP, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespace.
+func (in *NetNamespace) DeepCopy() *NetNamespace {
+ if in == nil {
+ return nil
+ }
+ out := new(NetNamespace)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetNamespace) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetNamespaceList) DeepCopyInto(out *NetNamespaceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]NetNamespace, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespaceList.
+func (in *NetNamespaceList) DeepCopy() *NetNamespaceList {
+ if in == nil {
+ return nil
+ }
+ out := new(NetNamespaceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetNamespaceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/network/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..2f32210d28
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,126 @@
+clusternetworks.network.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/527
+ CRDName: clusternetworks.network.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "001"
+ FilenameRunLevel: ""
+ GroupName: network.openshift.io
+ HasStatus: false
+ KindName: ClusterNetwork
+ Labels: {}
+ PluralName: clusternetworks
+ PrinterColumns:
+ - description: The primary cluster network CIDR
+ jsonPath: .network
+ name: Cluster Network
+ type: string
+ - description: The service network CIDR
+ jsonPath: .serviceNetwork
+ name: Service Network
+ type: string
+ - description: The OpenShift SDN network plug-in in use
+ jsonPath: .pluginName
+ name: Plugin Name
+ type: string
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+egressnetworkpolicies.network.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/527
+ CRDName: egressnetworkpolicies.network.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "004"
+ FilenameRunLevel: ""
+ GroupName: network.openshift.io
+ HasStatus: false
+ KindName: EgressNetworkPolicy
+ Labels: {}
+ PluralName: egressnetworkpolicies
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+hostsubnets.network.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/527
+ CRDName: hostsubnets.network.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "002"
+ FilenameRunLevel: ""
+ GroupName: network.openshift.io
+ HasStatus: false
+ KindName: HostSubnet
+ Labels: {}
+ PluralName: hostsubnets
+ PrinterColumns:
+ - description: The name of the node
+ jsonPath: .host
+ name: Host
+ type: string
+ - description: The IP address to be used as a VTEP by other nodes in the overlay
+ network
+ jsonPath: .hostIP
+ name: Host IP
+ type: string
+ - description: The CIDR range of the overlay network assigned to the node for its
+ pods
+ jsonPath: .subnet
+ name: Subnet
+ type: string
+ - description: The network egress CIDRs
+ jsonPath: .egressCIDRs
+ name: Egress CIDRs
+ type: string
+ - description: The network egress IP addresses
+ jsonPath: .egressIPs
+ name: Egress IPs
+ type: string
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+netnamespaces.network.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/527
+ CRDName: netnamespaces.network.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "003"
+ FilenameRunLevel: ""
+ GroupName: network.openshift.io
+ HasStatus: false
+ KindName: NetNamespace
+ Labels: {}
+ PluralName: netnamespaces
+ PrinterColumns:
+ - description: The network identifier of the network namespace
+ jsonPath: .netid
+ name: NetID
+ type: integer
+ - description: The network egress IP addresses
+ jsonPath: .egressIPs
+ name: Egress IPs
+ type: string
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..f92172acaf
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,145 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_ClusterNetwork = map[string]string{
+ "": "ClusterNetwork describes the cluster network. There is normally only one object of this type, named \"default\", which is created by the SDN network plugin based on the master configuration when the cluster is brought up for the first time.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "network": "Network is a CIDR string specifying the global overlay network's L3 space",
+ "hostsubnetlength": "HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods",
+ "serviceNetwork": "ServiceNetwork is the CIDR range that Service IP addresses are allocated from",
+ "pluginName": "PluginName is the name of the network plugin being used",
+ "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.",
+ "vxlanPort": "VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.",
+ "mtu": "MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.",
+}
+
+func (ClusterNetwork) SwaggerDoc() map[string]string {
+ return map_ClusterNetwork
+}
+
+var map_ClusterNetworkEntry = map[string]string{
+ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.",
+ "CIDR": "CIDR defines the total range of a cluster networks address space.",
+ "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.",
+}
+
+func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
+ return map_ClusterNetworkEntry
+}
+
+var map_ClusterNetworkList = map[string]string{
+ "": "ClusterNetworkList is a collection of ClusterNetworks\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of cluster networks",
+}
+
+func (ClusterNetworkList) SwaggerDoc() map[string]string {
+ return map_ClusterNetworkList
+}
+
+var map_EgressNetworkPolicy = map[string]string{
+ "": "EgressNetworkPolicy describes the current egress network policy for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy is present) then the traffic will be allowed by default.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the current egress network policy",
+}
+
+func (EgressNetworkPolicy) SwaggerDoc() map[string]string {
+ return map_EgressNetworkPolicy
+}
+
+var map_EgressNetworkPolicyList = map[string]string{
+ "": "EgressNetworkPolicyList is a collection of EgressNetworkPolicy\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is the list of policies",
+}
+
+func (EgressNetworkPolicyList) SwaggerDoc() map[string]string {
+ return map_EgressNetworkPolicyList
+}
+
+var map_EgressNetworkPolicyPeer = map[string]string{
+ "": "EgressNetworkPolicyPeer specifies a target to apply egress network policy to",
+ "cidrSelector": "CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.",
+ "dnsName": "DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset",
+}
+
+func (EgressNetworkPolicyPeer) SwaggerDoc() map[string]string {
+ return map_EgressNetworkPolicyPeer
+}
+
+var map_EgressNetworkPolicyRule = map[string]string{
+ "": "EgressNetworkPolicyRule contains a single egress network policy rule",
+ "type": "type marks this as an \"Allow\" or \"Deny\" rule",
+ "to": "to is the target that traffic is allowed/denied to",
+}
+
+func (EgressNetworkPolicyRule) SwaggerDoc() map[string]string {
+ return map_EgressNetworkPolicyRule
+}
+
+var map_EgressNetworkPolicySpec = map[string]string{
+ "": "EgressNetworkPolicySpec provides a list of policies on outgoing network traffic",
+ "egress": "egress contains the list of egress policy rules",
+}
+
+func (EgressNetworkPolicySpec) SwaggerDoc() map[string]string {
+ return map_EgressNetworkPolicySpec
+}
+
+var map_HostSubnet = map[string]string{
+ "": "HostSubnet describes the container subnet network on a node. The HostSubnet object must have the same name as the Node object it corresponds to.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "host": "Host is the name of the node. (This is the same as the object's name, but both fields must be set.)",
+ "hostIP": "HostIP is the IP address to be used as a VTEP by other nodes in the overlay network",
+ "subnet": "Subnet is the CIDR range of the overlay network assigned to the node for its pods",
+ "egressIPs": "EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.",
+ "egressCIDRs": "EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.",
+}
+
+func (HostSubnet) SwaggerDoc() map[string]string {
+ return map_HostSubnet
+}
+
+var map_HostSubnetList = map[string]string{
+ "": "HostSubnetList is a collection of HostSubnets\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of host subnets",
+}
+
+func (HostSubnetList) SwaggerDoc() map[string]string {
+ return map_HostSubnetList
+}
+
+var map_NetNamespace = map[string]string{
+ "": "NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant plugin, every Namespace will have a corresponding NetNamespace object with the same name. (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.)\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "netname": "NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)",
+ "netid": "NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.",
+ "egressIPs": "EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)",
+}
+
+func (NetNamespace) SwaggerDoc() map[string]string {
+ return map_NetNamespace
+}
+
+var map_NetNamespaceList = map[string]string{
+ "": "NetNamespaceList is a collection of NetNamespaces\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of net namespaces",
+}
+
+func (NetNamespaceList) SwaggerDoc() map[string]string {
+ return map_NetNamespaceList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/network/v1alpha1/Makefile b/vendor/github.com/openshift/api/network/v1alpha1/Makefile
new file mode 100644
index 0000000000..376fee2dc0
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1alpha1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.openshift.io/v1alpha1"
diff --git a/vendor/github.com/openshift/api/network/v1alpha1/doc.go b/vendor/github.com/openshift/api/network/v1alpha1/doc.go
new file mode 100644
index 0000000000..35539c458c
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1alpha1/doc.go
@@ -0,0 +1,6 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=network.openshift.io
+package v1alpha1
diff --git a/vendor/github.com/openshift/api/network/v1alpha1/register.go b/vendor/github.com/openshift/api/network/v1alpha1/register.go
new file mode 100644
index 0000000000..6d80c234ba
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1alpha1/register.go
@@ -0,0 +1,40 @@
+package v1alpha1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "network.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+
+ scheme.AddKnownTypes(GroupVersion,
+ &DNSNameResolver{},
+ &DNSNameResolverList{},
+ )
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go
new file mode 100644
index 0000000000..394f2e4ac0
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go
@@ -0,0 +1,142 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=dnsnameresolvers,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1524
+// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=dns,operatorOrdering=00
+// +openshift:compatibility-gen:level=4
+// +openshift:enable:FeatureGate=DNSNameResolver
+
+// DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set.
+// It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+type DNSNameResolver struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the specification of the desired behavior of the DNSNameResolver.
+ // +kubebuilder:validation:Required
+ Spec DNSNameResolverSpec `json:"spec"`
+ // status is the most recently observed status of the DNSNameResolver.
+ // +optional
+ Status DNSNameResolverStatus `json:"status,omitempty"`
+}
+
+// DNSName is used for validation of a DNS name.
+// +kubebuilder:validation:Pattern=`^(\*\.)?([a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?\.){2,}$`
+// +kubebuilder:validation:MaxLength=254
+type DNSName string
+
+// DNSNameResolverSpec is a desired state description of DNSNameResolver.
+type DNSNameResolverSpec struct {
+ // name is the DNS name for which the DNS name resolution information will be stored.
+ // For a regular DNS name, only the DNS name resolution information of the regular DNS
+ // name will be stored. For a wildcard DNS name, the DNS name resolution information
+ // of all the DNS names that match the wildcard DNS name will be stored.
+ // For a wildcard DNS name, the '*' will match only one label. Additionally, only a single
+ // '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.'
+ // will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.'
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="spec.name is immutable"
+ Name DNSName `json:"name"`
+}
+
+// DNSNameResolverStatus defines the observed status of DNSNameResolver.
+type DNSNameResolverStatus struct {
+ // resolvedNames contains a list of matching DNS names and their corresponding IP addresses
+ // along with their TTL and last DNS lookup times.
+ // +listType=map
+ // +listMapKey=dnsName
+ // +patchMergeKey=dnsName
+ // +patchStrategy=merge
+ // +optional
+ ResolvedNames []DNSNameResolverResolvedName `json:"resolvedNames,omitempty" patchStrategy:"merge" patchMergeKey:"dnsName"`
+}
+
+// DNSNameResolverResolvedName describes the details of a resolved DNS name.
+type DNSNameResolverResolvedName struct {
+ // conditions provide information about the state of the DNS name.
+ // Known .status.conditions.type is: "Degraded".
+ // "Degraded" is true when the last resolution failed for the DNS name,
+ // and false otherwise.
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can
+ // store both regular and wildcard DNS names which match the spec.name field. When the spec.name
+ // field contains a regular DNS name, this field will store the same regular DNS name after it is
+ // successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName
+ // will store the regular DNS names which match the wildcard DNS name and have been successfully resolved.
+ // If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard
+ // DNS name as well.
+ // +kubebuilder:validation:Required
+ DNSName DNSName `json:"dnsName"`
+
+ // resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last
+ // lookup times for the dnsName.
+ // +kubebuilder:validation:Required
+ // +listType=map
+ // +listMapKey=ip
+ ResolvedAddresses []DNSNameResolverResolvedAddress `json:"resolvedAddresses"`
+
+ // resolutionFailures keeps the count of how many consecutive times the DNS resolution failed
+ // for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon
+ // every failure, the value of the field will be incremented by one. The details about the DNS
+ // name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the
+ // associated IP addresses have expired.
+ ResolutionFailures int32 `json:"resolutionFailures,omitempty"`
+}
+
+// DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name.
+type DNSNameResolverResolvedAddress struct {
+ // ip is an IP address associated with the dnsName. The validity of the IP address expires after
+ // lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon
+ // the expiration of the IP address's validity. If the information is not refreshed then it will
+ // be removed with a grace period after the expiration of the IP address's validity.
+ // +kubebuilder:validation:Required
+ IP string `json:"ip"`
+
+ // ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after
+ // lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with
+ // the current time-to-live value. If the information is not refreshed then it will be removed with a
+ // grace period after the expiration of the IP address's validity.
+ // +kubebuilder:validation:Required
+ TTLSeconds int32 `json:"ttlSeconds"`
+
+ // lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of
+ // the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to
+ // the current time on a successful DNS lookup. If the information is not refreshed then it will be
+ // removed with a grace period after the expiration of the IP address's validity.
+ // +kubebuilder:validation:Required
+ LastLookupTime *metav1.Time `json:"lastLookupTime"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +openshift:compatibility-gen:level=4
+
+// DNSNameResolverList contains a list of DNSNameResolvers.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+type DNSNameResolverList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // items gives the list of DNSNameResolvers.
+ Items []DNSNameResolver `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..b8308c3f83
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,161 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSNameResolver) DeepCopyInto(out *DNSNameResolver) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolver.
+func (in *DNSNameResolver) DeepCopy() *DNSNameResolver {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSNameResolver)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSNameResolver) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSNameResolverList) DeepCopyInto(out *DNSNameResolverList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNSNameResolver, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverList.
+func (in *DNSNameResolverList) DeepCopy() *DNSNameResolverList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSNameResolverList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSNameResolverList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSNameResolverResolvedAddress) DeepCopyInto(out *DNSNameResolverResolvedAddress) {
+ *out = *in
+ if in.LastLookupTime != nil {
+ in, out := &in.LastLookupTime, &out.LastLookupTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedAddress.
+func (in *DNSNameResolverResolvedAddress) DeepCopy() *DNSNameResolverResolvedAddress {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSNameResolverResolvedAddress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSNameResolverResolvedName) DeepCopyInto(out *DNSNameResolverResolvedName) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ResolvedAddresses != nil {
+ in, out := &in.ResolvedAddresses, &out.ResolvedAddresses
+ *out = make([]DNSNameResolverResolvedAddress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverResolvedName.
+func (in *DNSNameResolverResolvedName) DeepCopy() *DNSNameResolverResolvedName {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSNameResolverResolvedName)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSNameResolverSpec) DeepCopyInto(out *DNSNameResolverSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverSpec.
+func (in *DNSNameResolverSpec) DeepCopy() *DNSNameResolverSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSNameResolverSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSNameResolverStatus) DeepCopyInto(out *DNSNameResolverStatus) {
+ *out = *in
+ if in.ResolvedNames != nil {
+ in, out := &in.ResolvedNames, &out.ResolvedNames
+ *out = make([]DNSNameResolverResolvedName, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNameResolverStatus.
+func (in *DNSNameResolverStatus) DeepCopy() *DNSNameResolverStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSNameResolverStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..0070eb584e
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,23 @@
+dnsnameresolvers.network.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1524
+ CRDName: dnsnameresolvers.network.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - DNSNameResolver
+ FilenameOperatorName: dns
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: "0000_70"
+ GroupName: network.openshift.io
+ HasStatus: true
+ KindName: DNSNameResolver
+ Labels: {}
+ PluralName: dnsnameresolvers
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates:
+ - DNSNameResolver
+ Version: v1alpha1
+
diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..e5018a9736
--- /dev/null
+++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,76 @@
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_DNSNameResolver = map[string]string{
+ "": "DNSNameResolver stores the DNS name resolution information of a DNS name. It can be enabled by the TechPreviewNoUpgrade feature set. It can also be enabled by the feature gate DNSNameResolver when using CustomNoUpgrade feature set.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the DNSNameResolver.",
+ "status": "status is the most recently observed status of the DNSNameResolver.",
+}
+
+func (DNSNameResolver) SwaggerDoc() map[string]string {
+ return map_DNSNameResolver
+}
+
+var map_DNSNameResolverList = map[string]string{
+ "": "DNSNameResolverList contains a list of DNSNameResolvers.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items gives the list of DNSNameResolvers.",
+}
+
+func (DNSNameResolverList) SwaggerDoc() map[string]string {
+ return map_DNSNameResolverList
+}
+
+var map_DNSNameResolverResolvedAddress = map[string]string{
+ "": "DNSNameResolverResolvedAddress describes the details of an IP address for a resolved DNS name.",
+ "ip": "ip is an IP address associated with the dnsName. The validity of the IP address expires after lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon the expiration of the IP address's validity. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.",
+ "ttlSeconds": "ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with the current time-to-live value. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.",
+ "lastLookupTime": "lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to the current time on a successful DNS lookup. If the information is not refreshed then it will be removed with a grace period after the expiration of the IP address's validity.",
+}
+
+func (DNSNameResolverResolvedAddress) SwaggerDoc() map[string]string {
+ return map_DNSNameResolverResolvedAddress
+}
+
+var map_DNSNameResolverResolvedName = map[string]string{
+ "": "DNSNameResolverResolvedName describes the details of a resolved DNS name.",
+ "conditions": "conditions provide information about the state of the DNS name. Known .status.conditions.type is: \"Degraded\". \"Degraded\" is true when the last resolution failed for the DNS name, and false otherwise.",
+ "dnsName": "dnsName is the resolved DNS name matching the name field of DNSNameResolverSpec. This field can store both regular and wildcard DNS names which match the spec.name field. When the spec.name field contains a regular DNS name, this field will store the same regular DNS name after it is successfully resolved. When the spec.name field contains a wildcard DNS name, each resolvedName.dnsName will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard DNS name as well.",
+ "resolvedAddresses": "resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last lookup times for the dnsName.",
+ "resolutionFailures": "resolutionFailures keeps the count of how many consecutive times the DNS resolution failed for the dnsName. If the DNS resolution succeeds then the field will be set to zero. Upon every failure, the value of the field will be incremented by one. The details about the DNS name will be removed, if the value of resolutionFailures reaches 5 and the TTL of all the associated IP addresses have expired.",
+}
+
+func (DNSNameResolverResolvedName) SwaggerDoc() map[string]string {
+ return map_DNSNameResolverResolvedName
+}
+
+var map_DNSNameResolverSpec = map[string]string{
+ "": "DNSNameResolverSpec is a desired state description of DNSNameResolver.",
+ "name": "name is the DNS name for which the DNS name resolution information will be stored. For a regular DNS name, only the DNS name resolution information of the regular DNS name will be stored. For a wildcard DNS name, the DNS name resolution information of all the DNS names that match the wildcard DNS name will be stored. For a wildcard DNS name, the '*' will match only one label. Additionally, only a single '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.'",
+}
+
+func (DNSNameResolverSpec) SwaggerDoc() map[string]string {
+ return map_DNSNameResolverSpec
+}
+
+var map_DNSNameResolverStatus = map[string]string{
+ "": "DNSNameResolverStatus defines the observed status of DNSNameResolver.",
+ "resolvedNames": "resolvedNames contains a list of matching DNS names and their corresponding IP addresses along with their TTL and last DNS lookup times.",
+}
+
+func (DNSNameResolverStatus) SwaggerDoc() map[string]string {
+ return map_DNSNameResolverStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/networkoperator/.codegen.yaml b/vendor/github.com/openshift/api/networkoperator/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/networkoperator/OWNERS b/vendor/github.com/openshift/api/networkoperator/OWNERS
new file mode 100644
index 0000000000..6148b9f77f
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/OWNERS
@@ -0,0 +1,5 @@
+reviewers:
+ - danwinship
+ - dcbw
+ - knobunc
+ - squeed
diff --git a/vendor/github.com/openshift/api/networkoperator/install.go b/vendor/github.com/openshift/api/networkoperator/install.go
new file mode 100644
index 0000000000..b06383bf42
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/install.go
@@ -0,0 +1,26 @@
+package networkoperator
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ networkoperatorv1 "github.com/openshift/api/networkoperator/v1"
+)
+
+const (
+ GroupName = "network.operator.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(networkoperatorv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/Makefile b/vendor/github.com/openshift/api/networkoperator/v1/Makefile
new file mode 100644
index 0000000000..96c9e16399
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.operator.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/doc.go b/vendor/github.com/openshift/api/networkoperator/v1/doc.go
new file mode 100644
index 0000000000..3c958bbc6e
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/doc.go
@@ -0,0 +1,5 @@
+// Package v1 contains API Schema definitions for the network v1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=network.operator.openshift.io
+// +kubebuilder:validation:Optional
+package v1
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.pb.go b/vendor/github.com/openshift/api/networkoperator/v1/generated.pb.go
new file mode 100644
index 0000000000..e275237047
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.pb.go
@@ -0,0 +1,2552 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/networkoperator/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *EgressRouter) Reset() { *m = EgressRouter{} }
+func (*EgressRouter) ProtoMessage() {}
+func (*EgressRouter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{0}
+}
+func (m *EgressRouter) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressRouter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressRouter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressRouter.Merge(m, src)
+}
+func (m *EgressRouter) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressRouter) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressRouter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressRouter proto.InternalMessageInfo
+
+func (m *EgressRouterAddress) Reset() { *m = EgressRouterAddress{} }
+func (*EgressRouterAddress) ProtoMessage() {}
+func (*EgressRouterAddress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{1}
+}
+func (m *EgressRouterAddress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressRouterAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressRouterAddress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressRouterAddress.Merge(m, src)
+}
+func (m *EgressRouterAddress) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressRouterAddress) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressRouterAddress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressRouterAddress proto.InternalMessageInfo
+
+func (m *EgressRouterInterface) Reset() { *m = EgressRouterInterface{} }
+func (*EgressRouterInterface) ProtoMessage() {}
+func (*EgressRouterInterface) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{2}
+}
+func (m *EgressRouterInterface) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressRouterInterface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressRouterInterface) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressRouterInterface.Merge(m, src)
+}
+func (m *EgressRouterInterface) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressRouterInterface) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressRouterInterface.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressRouterInterface proto.InternalMessageInfo
+
+func (m *EgressRouterList) Reset() { *m = EgressRouterList{} }
+func (*EgressRouterList) ProtoMessage() {}
+func (*EgressRouterList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{3}
+}
+func (m *EgressRouterList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressRouterList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressRouterList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressRouterList.Merge(m, src)
+}
+func (m *EgressRouterList) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressRouterList) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressRouterList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressRouterList proto.InternalMessageInfo
+
+func (m *EgressRouterSpec) Reset() { *m = EgressRouterSpec{} }
+func (*EgressRouterSpec) ProtoMessage() {}
+func (*EgressRouterSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{4}
+}
+func (m *EgressRouterSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressRouterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressRouterSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressRouterSpec.Merge(m, src)
+}
+func (m *EgressRouterSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressRouterSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressRouterSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressRouterSpec proto.InternalMessageInfo
+
+func (m *EgressRouterStatus) Reset() { *m = EgressRouterStatus{} }
+func (*EgressRouterStatus) ProtoMessage() {}
+func (*EgressRouterStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{5}
+}
+func (m *EgressRouterStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressRouterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressRouterStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressRouterStatus.Merge(m, src)
+}
+func (m *EgressRouterStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressRouterStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressRouterStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressRouterStatus proto.InternalMessageInfo
+
+func (m *EgressRouterStatusCondition) Reset() { *m = EgressRouterStatusCondition{} }
+func (*EgressRouterStatusCondition) ProtoMessage() {}
+func (*EgressRouterStatusCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{6}
+}
+func (m *EgressRouterStatusCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EgressRouterStatusCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EgressRouterStatusCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EgressRouterStatusCondition.Merge(m, src)
+}
+func (m *EgressRouterStatusCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *EgressRouterStatusCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_EgressRouterStatusCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EgressRouterStatusCondition proto.InternalMessageInfo
+
+func (m *L4RedirectRule) Reset() { *m = L4RedirectRule{} }
+func (*L4RedirectRule) ProtoMessage() {}
+func (*L4RedirectRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{7}
+}
+func (m *L4RedirectRule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *L4RedirectRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *L4RedirectRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_L4RedirectRule.Merge(m, src)
+}
+func (m *L4RedirectRule) XXX_Size() int {
+ return m.Size()
+}
+func (m *L4RedirectRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_L4RedirectRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_L4RedirectRule proto.InternalMessageInfo
+
+func (m *MacvlanConfig) Reset() { *m = MacvlanConfig{} }
+func (*MacvlanConfig) ProtoMessage() {}
+func (*MacvlanConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{8}
+}
+func (m *MacvlanConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MacvlanConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MacvlanConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MacvlanConfig.Merge(m, src)
+}
+func (m *MacvlanConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *MacvlanConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_MacvlanConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MacvlanConfig proto.InternalMessageInfo
+
+func (m *RedirectConfig) Reset() { *m = RedirectConfig{} }
+func (*RedirectConfig) ProtoMessage() {}
+func (*RedirectConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4bddfca96304d190, []int{9}
+}
+func (m *RedirectConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RedirectConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RedirectConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RedirectConfig.Merge(m, src)
+}
+func (m *RedirectConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *RedirectConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_RedirectConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RedirectConfig proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*EgressRouter)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouter")
+ proto.RegisterType((*EgressRouterAddress)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterAddress")
+ proto.RegisterType((*EgressRouterInterface)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterInterface")
+ proto.RegisterType((*EgressRouterList)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterList")
+ proto.RegisterType((*EgressRouterSpec)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterSpec")
+ proto.RegisterType((*EgressRouterStatus)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterStatus")
+ proto.RegisterType((*EgressRouterStatusCondition)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterStatusCondition")
+ proto.RegisterType((*L4RedirectRule)(nil), "github.com.openshift.api.networkoperator.v1.L4RedirectRule")
+ proto.RegisterType((*MacvlanConfig)(nil), "github.com.openshift.api.networkoperator.v1.MacvlanConfig")
+ proto.RegisterType((*RedirectConfig)(nil), "github.com.openshift.api.networkoperator.v1.RedirectConfig")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/networkoperator/v1/generated.proto", fileDescriptor_4bddfca96304d190)
+}
+
+var fileDescriptor_4bddfca96304d190 = []byte{
+ // 960 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcf, 0x6f, 0x23, 0x35,
+ 0x14, 0xce, 0xa4, 0x49, 0x9b, 0xba, 0x4d, 0xb7, 0x18, 0xad, 0x14, 0x65, 0xa5, 0xa4, 0xcc, 0x61,
+ 0xb5, 0xb0, 0x62, 0x86, 0x56, 0x15, 0x62, 0x29, 0x08, 0x76, 0xca, 0xaf, 0x48, 0x0d, 0x04, 0xd3,
+ 0x13, 0x5a, 0x01, 0xee, 0x8c, 0x33, 0x19, 0x92, 0x19, 0x0f, 0xb6, 0x93, 0xa5, 0x12, 0x42, 0x1c,
+ 0x39, 0x22, 0xfe, 0x0f, 0xfe, 0x01, 0x0e, 0x9c, 0x7b, 0xec, 0x71, 0x4f, 0x11, 0x0d, 0x17, 0xfe,
+ 0x86, 0x9e, 0x90, 0x3d, 0x9e, 0x5f, 0xed, 0xb2, 0x6a, 0xba, 0xb7, 0xf8, 0xf9, 0x7d, 0xdf, 0xf7,
+ 0xfc, 0xfc, 0xf9, 0x4d, 0xc0, 0x81, 0x1f, 0x88, 0xd1, 0xf4, 0xc4, 0x72, 0x69, 0x68, 0xd3, 0x98,
+ 0x44, 0x7c, 0x14, 0x0c, 0x85, 0x8d, 0xe3, 0xc0, 0x8e, 0x88, 0x78, 0x4a, 0xd9, 0x98, 0xc6, 0x84,
+ 0x61, 0x41, 0x99, 0x3d, 0xdb, 0xb5, 0x7d, 0x12, 0xc9, 0x05, 0xf1, 0xac, 0x98, 0x51, 0x41, 0xe1,
+ 0xc3, 0x1c, 0x6c, 0x65, 0x60, 0x0b, 0xc7, 0x81, 0x75, 0x05, 0x6c, 0xcd, 0x76, 0xdb, 0x6f, 0x16,
+ 0x94, 0x7c, 0xea, 0x53, 0x5b, 0x71, 0x9c, 0x4c, 0x87, 0x6a, 0xa5, 0x16, 0xea, 0x57, 0xc2, 0xdd,
+ 0xde, 0x1f, 0xbf, 0xc3, 0xad, 0x80, 0xca, 0x52, 0x42, 0xec, 0x8e, 0x82, 0x88, 0xb0, 0x53, 0x3b,
+ 0x1e, 0xfb, 0x32, 0xc0, 0xed, 0x90, 0x08, 0xfc, 0x9c, 0x8a, 0xda, 0x6f, 0xff, 0x1f, 0x8a, 0x4d,
+ 0x23, 0x11, 0x84, 0xc4, 0xe6, 0xee, 0x88, 0x84, 0xf8, 0x2a, 0xce, 0xfc, 0xb3, 0x0a, 0x36, 0x3f,
+ 0xf6, 0x19, 0xe1, 0x1c, 0xd1, 0xa9, 0x20, 0x0c, 0x7e, 0x07, 0x1a, 0x52, 0xc3, 0xc3, 0x02, 0xb7,
+ 0x8c, 0x1d, 0xe3, 0xc1, 0xc6, 0xde, 0x5b, 0x56, 0xc2, 0x6d, 0x15, 0xb9, 0xad, 0x78, 0xec, 0xcb,
+ 0x00, 0xb7, 0x64, 0xb6, 0x35, 0xdb, 0xb5, 0xbe, 0x38, 0xf9, 0x9e, 0xb8, 0xa2, 0x4f, 0x04, 0x76,
+ 0xe0, 0xd9, 0xbc, 0x5b, 0x59, 0xcc, 0xbb, 0x20, 0x8f, 0xa1, 0x8c, 0x15, 0x7e, 0x0b, 0x6a, 0x3c,
+ 0x26, 0x6e, 0xab, 0xaa, 0xd8, 0xdf, 0xb7, 0x96, 0xe8, 0xa5, 0x55, 0x2c, 0xf5, 0xab, 0x98, 0xb8,
+ 0xce, 0xa6, 0x96, 0xaa, 0xc9, 0x15, 0x52, 0xc4, 0xd0, 0x07, 0xab, 0x5c, 0x60, 0x31, 0xe5, 0xad,
+ 0x15, 0x25, 0xf1, 0xc1, 0xed, 0x25, 0x14, 0x8d, 0xb3, 0xa5, 0x45, 0x56, 0x93, 0x35, 0xd2, 0xf4,
+ 0xe6, 0x13, 0xf0, 0x6a, 0x31, 0xfb, 0xb1, 0xe7, 0xc9, 0x05, 0x6c, 0x83, 0x6a, 0x10, 0xab, 0xe6,
+ 0xad, 0x3b, 0x40, 0x43, 0xab, 0xbd, 0x01, 0xaa, 0x06, 0x31, 0x7c, 0x1d, 0xac, 0xf9, 0x58, 0x90,
+ 0xa7, 0xf8, 0x54, 0x9d, 0x7f, 0xdd, 0xb9, 0xa3, 0x13, 0xd6, 0x3e, 0x4d, 0xc2, 0x28, 0xdd, 0x37,
+ 0x7f, 0x06, 0x77, 0x8b, 0xec, 0xbd, 0x48, 0x10, 0x36, 0xc4, 0x2e, 0x81, 0x04, 0xac, 0x85, 0xd8,
+ 0x9d, 0x4d, 0x70, 0xa4, 0x6f, 0xe8, 0xdd, 0xa5, 0x0e, 0xd8, 0x4f, 0xb0, 0x87, 0x34, 0x1a, 0x06,
+ 0x7e, 0xae, 0xaf, 0xc3, 0x28, 0xe5, 0x36, 0xcf, 0x0d, 0xb0, 0x5d, 0x2c, 0xe0, 0x28, 0xe0, 0x02,
+ 0x3e, 0xb9, 0x66, 0x0f, 0xeb, 0x66, 0xf6, 0x90, 0x68, 0x65, 0x8e, 0x6d, 0x2d, 0xd8, 0x48, 0x23,
+ 0x05, 0x6b, 0x7c, 0x03, 0xea, 0x81, 0x20, 0x21, 0x6f, 0x55, 0x77, 0x56, 0x1e, 0x6c, 0xec, 0x3d,
+ 0xba, 0xf5, 0xc5, 0x39, 0x4d, 0xad, 0x52, 0xef, 0x49, 0x3e, 0x94, 0xd0, 0x9a, 0x7f, 0xac, 0x94,
+ 0x8f, 0x24, 0x4d, 0x03, 0xf7, 0x41, 0x2d, 0xa4, 0x1e, 0xd1, 0x17, 0xb6, 0x93, 0x1a, 0xaa, 0x4f,
+ 0x3d, 0x72, 0x39, 0xef, 0x96, 0xf2, 0x65, 0x0c, 0xa9, 0x6c, 0x48, 0x40, 0x83, 0x11, 0x2f, 0x60,
+ 0xc4, 0x15, 0xda, 0xc9, 0x07, 0x4b, 0x55, 0x8b, 0x34, 0x58, 0x5f, 0xc3, 0xa6, 0xec, 0x48, 0x1a,
+ 0x43, 0x19, 0x35, 0xfc, 0xd5, 0x00, 0xdb, 0x1a, 0x9d, 0x19, 0x40, 0xdb, 0xda, 0xb9, 0x75, 0x77,
+ 0x32, 0x26, 0xa7, 0xa5, 0x4f, 0xbb, 0xfd, 0xf9, 0x15, 0x0d, 0x74, 0x4d, 0x15, 0xfe, 0x00, 0xd6,
+ 0x71, 0xe2, 0x70, 0xc2, 0x5b, 0x35, 0x75, 0x41, 0x1f, 0xde, 0xba, 0x04, 0xfd, 0x56, 0x9c, 0x57,
+ 0x74, 0x01, 0xeb, 0x8f, 0x53, 0x6a, 0x94, 0xab, 0x98, 0xbf, 0x1b, 0x00, 0x5e, 0x7f, 0x8f, 0xf0,
+ 0x27, 0x00, 0x5c, 0x1a, 0x79, 0x81, 0x08, 0x68, 0xc4, 0x5b, 0x86, 0x2a, 0xe5, 0xb3, 0x97, 0x7c,
+ 0xe4, 0x87, 0x29, 0x61, 0x3e, 0xbd, 0xb2, 0x10, 0x47, 0x05, 0x3d, 0xf3, 0xdf, 0x2a, 0xb8, 0xf7,
+ 0x02, 0x3c, 0x3c, 0x04, 0x35, 0x71, 0x1a, 0xa7, 0x7e, 0xb2, 0x53, 0x3f, 0x1d, 0x9f, 0xc6, 0xd2,
+ 0x4f, 0xdd, 0x17, 0x40, 0x65, 0x0a, 0x52, 0x60, 0xf8, 0x28, 0x9b, 0x61, 0xc9, 0x98, 0x78, 0xad,
+ 0x3c, 0x82, 0x2e, 0xe7, 0xdd, 0x3b, 0x19, 0xac, 0x3c, 0x95, 0xe0, 0x0c, 0xc0, 0x09, 0xe6, 0xe2,
+ 0x98, 0xe1, 0x88, 0x27, 0xb4, 0x41, 0x98, 0x7a, 0xe6, 0x8d, 0x9b, 0x3d, 0x56, 0x89, 0x70, 0xda,
+ 0x5a, 0x12, 0x1e, 0x5d, 0x63, 0x43, 0xcf, 0x51, 0x80, 0xf7, 0xc1, 0x2a, 0x23, 0x98, 0xd3, 0xa8,
+ 0x55, 0x53, 0x25, 0x67, 0x53, 0x13, 0xa9, 0x28, 0xd2, 0xbb, 0x72, 0x04, 0x86, 0x84, 0x73, 0xec,
+ 0x93, 0x56, 0xbd, 0x3c, 0x02, 0xfb, 0x49, 0x18, 0xa5, 0xfb, 0xe6, 0x85, 0x01, 0xb6, 0x8e, 0xf6,
+ 0xb3, 0x67, 0x31, 0x9d, 0x10, 0x78, 0x00, 0x9a, 0x1e, 0xe1, 0x22, 0x88, 0xb0, 0x14, 0xee, 0x0d,
+ 0x74, 0x9b, 0xef, 0x6a, 0x8e, 0xe6, 0x47, 0xc5, 0x4d, 0x54, 0xce, 0x85, 0x3b, 0xa0, 0x16, 0x53,
+ 0x96, 0x3c, 0xd8, 0x7a, 0xfe, 0xed, 0x18, 0x50, 0x26, 0x90, 0xda, 0x81, 0xef, 0x81, 0x86, 0xfa,
+ 0x30, 0xba, 0x74, 0xa2, 0x5a, 0x96, 0x0f, 0x84, 0xc6, 0x40, 0xc7, 0x2f, 0xe7, 0xdd, 0xcd, 0xf4,
+ 0xb7, 0xba, 0xb1, 0x0c, 0x01, 0xf7, 0x00, 0x10, 0x98, 0xf9, 0x44, 0x48, 0x46, 0xd5, 0x86, 0x7a,
+ 0x6e, 0xa7, 0xe3, 0x6c, 0x07, 0x15, 0xb2, 0xcc, 0x11, 0x68, 0x96, 0x26, 0x32, 0xb4, 0x4b, 0xf3,
+ 0xe8, 0xde, 0x95, 0x79, 0xb4, 0xa1, 0x93, 0x0b, 0xa3, 0xe8, 0x3e, 0x58, 0x0d, 0x31, 0x17, 0x84,
+ 0x69, 0xaf, 0x64, 0x8d, 0xef, 0xab, 0x28, 0xd2, 0xbb, 0xe6, 0x5f, 0x06, 0xd8, 0x2a, 0x8f, 0x1d,
+ 0xf8, 0x23, 0x68, 0xb2, 0x42, 0x77, 0xd3, 0xc7, 0xb4, 0xdc, 0x28, 0x2b, 0xdf, 0x50, 0x7e, 0x15,
+ 0xc5, 0x28, 0x47, 0x65, 0x21, 0xd9, 0xaa, 0x21, 0x9e, 0x4c, 0x4e, 0xb0, 0x3b, 0xee, 0x0d, 0x74,
+ 0xe1, 0x59, 0xab, 0x3e, 0xc9, 0x76, 0x50, 0x21, 0xcb, 0xf9, 0xf2, 0xec, 0xa2, 0x53, 0x39, 0xbf,
+ 0xe8, 0x54, 0x9e, 0x5d, 0x74, 0x2a, 0xbf, 0x2c, 0x3a, 0xc6, 0xd9, 0xa2, 0x63, 0x9c, 0x2f, 0x3a,
+ 0xc6, 0xb3, 0x45, 0xc7, 0xf8, 0x7b, 0xd1, 0x31, 0x7e, 0xfb, 0xa7, 0x53, 0xf9, 0xfa, 0xe1, 0x12,
+ 0xff, 0xec, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x47, 0x9e, 0x13, 0x07, 0x0a, 0x00, 0x00,
+}
+
+func (m *EgressRouter) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressRouter) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressRouter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressRouterAddress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressRouterAddress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressRouterAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Gateway)
+ copy(dAtA[i:], m.Gateway)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.IP)
+ copy(dAtA[i:], m.IP)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressRouterInterface) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressRouterInterface) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressRouterInterface) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Macvlan.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressRouterList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressRouterList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressRouterList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressRouterSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressRouterSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressRouterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Addresses) > 0 {
+ for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ {
+ size, err := m.NetworkInterface.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Redirect != nil {
+ {
+ size, err := m.Redirect.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Mode)
+ copy(dAtA[i:], m.Mode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Mode)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressRouterStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressRouterStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressRouterStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EgressRouterStatusCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EgressRouterStatusCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EgressRouterStatusCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *L4RedirectRule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *L4RedirectRule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *L4RedirectRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort))
+ i--
+ dAtA[i] = 0x20
+ i -= len(m.Protocol)
+ copy(dAtA[i:], m.Protocol)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
+ i--
+ dAtA[i] = 0x1a
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.DestinationIP)
+ copy(dAtA[i:], m.DestinationIP)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationIP)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MacvlanConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MacvlanConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MacvlanConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Master)
+ copy(dAtA[i:], m.Master)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Master)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Mode)
+ copy(dAtA[i:], m.Mode)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Mode)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RedirectConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RedirectConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RedirectConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.FallbackIP)
+ copy(dAtA[i:], m.FallbackIP)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FallbackIP)))
+ i--
+ dAtA[i] = 0x12
+ if len(m.RedirectRules) > 0 {
+ for iNdEx := len(m.RedirectRules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.RedirectRules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *EgressRouter) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EgressRouterAddress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.IP)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Gateway)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EgressRouterInterface) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Macvlan.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EgressRouterList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EgressRouterSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Mode)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Redirect != nil {
+ l = m.Redirect.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = m.NetworkInterface.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Addresses) > 0 {
+ for _, e := range m.Addresses {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EgressRouterStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EgressRouterStatusCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *L4RedirectRule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.DestinationIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Port))
+ l = len(m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.TargetPort))
+ return n
+}
+
+func (m *MacvlanConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Mode)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Master)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *RedirectConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.RedirectRules) > 0 {
+ for _, e := range m.RedirectRules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.FallbackIP)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *EgressRouter) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EgressRouter{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EgressRouterSpec", "EgressRouterSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "EgressRouterStatus", "EgressRouterStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressRouterAddress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EgressRouterAddress{`,
+ `IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+ `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressRouterInterface) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EgressRouterInterface{`,
+ `Macvlan:` + strings.Replace(strings.Replace(this.Macvlan.String(), "MacvlanConfig", "MacvlanConfig", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressRouterList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]EgressRouter{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EgressRouter", "EgressRouter", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&EgressRouterList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressRouterSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForAddresses := "[]EgressRouterAddress{"
+ for _, f := range this.Addresses {
+ repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EgressRouterAddress", "EgressRouterAddress", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForAddresses += "}"
+ s := strings.Join([]string{`&EgressRouterSpec{`,
+ `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`,
+ `Redirect:` + strings.Replace(this.Redirect.String(), "RedirectConfig", "RedirectConfig", 1) + `,`,
+ `NetworkInterface:` + strings.Replace(strings.Replace(this.NetworkInterface.String(), "EgressRouterInterface", "EgressRouterInterface", 1), `&`, ``, 1) + `,`,
+ `Addresses:` + repeatedStringForAddresses + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressRouterStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]EgressRouterStatusCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "EgressRouterStatusCondition", "EgressRouterStatusCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&EgressRouterStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EgressRouterStatusCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EgressRouterStatusCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *L4RedirectRule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&L4RedirectRule{`,
+ `DestinationIP:` + fmt.Sprintf("%v", this.DestinationIP) + `,`,
+ `Port:` + fmt.Sprintf("%v", this.Port) + `,`,
+ `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+ `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MacvlanConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MacvlanConfig{`,
+ `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`,
+ `Master:` + fmt.Sprintf("%v", this.Master) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RedirectConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRedirectRules := "[]L4RedirectRule{"
+ for _, f := range this.RedirectRules {
+ repeatedStringForRedirectRules += strings.Replace(strings.Replace(f.String(), "L4RedirectRule", "L4RedirectRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRedirectRules += "}"
+ s := strings.Join([]string{`&RedirectConfig{`,
+ `RedirectRules:` + repeatedStringForRedirectRules + `,`,
+ `FallbackIP:` + fmt.Sprintf("%v", this.FallbackIP) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *EgressRouter) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressRouter: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressRouter: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressRouterAddress) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressRouterAddress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressRouterAddress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IP = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Gateway = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressRouterInterface) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressRouterInterface: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressRouterInterface: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Macvlan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Macvlan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressRouterList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressRouterList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressRouterList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, EgressRouter{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressRouterSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressRouterSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressRouterSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Mode = EgressRouterMode(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Redirect", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Redirect == nil {
+ m.Redirect = &RedirectConfig{}
+ }
+ if err := m.Redirect.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NetworkInterface", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.NetworkInterface.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addresses = append(m.Addresses, EgressRouterAddress{})
+ if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressRouterStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressRouterStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressRouterStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, EgressRouterStatusCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EgressRouterStatusCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EgressRouterStatusCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EgressRouterStatusCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = EgressRouterStatusConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *L4RedirectRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: L4RedirectRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: L4RedirectRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DestinationIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DestinationIP = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ m.Port = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Port |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Protocol = ProtocolType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
+ }
+ m.TargetPort = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TargetPort |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MacvlanConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MacvlanConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MacvlanConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Mode = MacvlanMode(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Master", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Master = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RedirectConfig) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RedirectConfig: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RedirectConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RedirectRules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RedirectRules = append(m.RedirectRules, L4RedirectRule{})
+ if err := m.RedirectRules[len(m.RedirectRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FallbackIP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FallbackIP = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto
new file mode 100644
index 0000000000..2f813e2a5e
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto
@@ -0,0 +1,194 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.networkoperator.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/networkoperator/v1";
+
+// EgressRouter is a feature allowing the user to define an egress router
+// that acts as a bridge between pods and external systems. The egress router runs
+// a service that redirects egress traffic originating from a pod or a group of
+// pods to a remote external system or multiple destinations as per configuration.
+//
+// It is consumed by the cluster-network-operator.
+// More specifically, given an EgressRouter CR with , the CNO will create and manage:
+// - A service called
+// - An egress pod called
+// - A NAD called
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+//
+// EgressRouter is a single egressrouter pod configuration object.
+// +k8s:openapi-gen=true
+// +openshift:compatibility-gen:level=1
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=egressrouters,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/851
+// +openshift:file-pattern=operatorOrdering=001
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=true
+// +kubebuilder:printcolumn:name="Condition",type=string,JSONPath=".status.conditions[*].type"
+// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=".status.conditions[*].status"
+message EgressRouter {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired egress router.
+ // +kubebuilder:validation:Required
+ optional EgressRouterSpec spec = 2;
+
+ // Observed status of EgressRouter.
+ optional EgressRouterStatus status = 3;
+}
+
+// EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface
+// +kubebuilder:validation:Required
+message EgressRouterAddress {
+ // IP is the address to configure on the router's interface. Can be IPv4 or IPv6.
+ // +kubebuilder:validation:Required
+ optional string ip = 1;
+
+ // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6.
+ optional string gateway = 2;
+}
+
+// EgressRouterInterface contains the configuration of interface to create/use.
+message EgressRouterInterface {
+ // Arguments specific to the interfaceType macvlan
+ // +kubebuilder:default:={mode: Bridge}
+ optional MacvlanConfig macvlan = 1;
+}
+
+// EgressRouterList is the list of egress router pods requested.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message EgressRouterList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ repeated EgressRouter items = 2;
+}
+
+// EgressRouterSpec contains the configuration for an egress router.
+// Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode.
+// Each config consists of parameters specific to that mode.
+// +k8s:openapi-gen=true
+// +kubebuilder:validation:Required
+message EgressRouterSpec {
+ // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="Redirect"
+ // +kubebuilder:default:="Redirect"
+ optional string mode = 1;
+
+ // Redirect represents the configuration parameters specific to redirect mode.
+ optional RedirectConfig redirect = 2;
+
+ // Specification of interface to create/use. The default is macvlan.
+ // Currently only macvlan is supported.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:default:={macvlan: {mode: Bridge}}
+ optional EgressRouterInterface networkInterface = 3;
+
+ // List of IP addresses to configure on the pod's secondary interface.
+ // +kubebuilder:validation:Required
+ repeated EgressRouterAddress addresses = 4;
+}
+
+// EgressRouterStatus contains the observed status of EgressRouter. Read-only.
+message EgressRouterStatus {
+ // Observed status of the egress router
+ // +kubebuilder:validation:Required
+ repeated EgressRouterStatusCondition conditions = 1;
+}
+
+// EgressRouterStatusCondition represents the state of the egress router's
+// managed and monitored components.
+// +k8s:deepcopy-gen=true
+message EgressRouterStatusCondition {
+ // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded"
+ // +required
+ optional string type = 1;
+
+ // Status of the condition, one of True, False, Unknown.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="True";"False";"Unknown"
+ // +required
+ optional string status = 2;
+
+ // LastTransitionTime is the time of the last update to the current status property.
+ // +kubebuilder:validation:Required
+ // +required
+ // +nullable
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // Reason is the CamelCase reason for the condition's current status.
+ optional string reason = 4;
+
+ // Message provides additional information about the current condition.
+ // This is only to be consumed by humans. It may contain Line Feed
+ // characters (U+000A), which should be rendered as new lines.
+ optional string message = 5;
+}
+
+// L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port.
+message L4RedirectRule {
+ // IP specifies the remote destination's IP address. Can be IPv4 or IPv6.
+ // +kubebuilder:validation:Required
+ optional string destinationIP = 1;
+
+ // Port is the port number to which clients should send traffic to be redirected.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Maximum:=65535
+ // +kubebuilder:validation:Minimum:=1
+ optional int32 port = 2;
+
+ // Protocol can be TCP, SCTP or UDP.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP"
+ optional string protocol = 3;
+
+ // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to.
+ // If unspecified, the value from "Port" is used.
+ // +kubebuilder:validation:Maximum:=65535
+ // +kubebuilder:validation:Minimum:=1
+ optional int32 targetPort = 4;
+}
+
+// MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType
+message MacvlanConfig {
+ // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge".
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru"
+ // +kubebuilder:default:="Bridge"
+ optional string mode = 1;
+
+ // Name of the master interface. Need not be specified if it can be inferred from the IP address.
+ optional string master = 2;
+}
+
+// RedirectConfig represents the configuration parameters specific to redirect mode.
+message RedirectConfig {
+ // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode.
+ repeated L4RedirectRule redirectRules = 1;
+
+ // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6.
+ // If no redirect rules are specified, all traffic from the router are redirected to this IP.
+ // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP.
+ // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.
+ optional string fallbackIP = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/register.go b/vendor/github.com/openshift/api/networkoperator/v1/register.go
new file mode 100644
index 0000000000..2fcb8dc0ff
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/register.go
@@ -0,0 +1,25 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "network.operator.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &EgressRouter{},
+ &EgressRouterList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go
new file mode 100644
index 0000000000..9f11590e05
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go
@@ -0,0 +1,270 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EgressRouter is a feature allowing the user to define an egress router
+// that acts as a bridge between pods and external systems. The egress router runs
+// a service that redirects egress traffic originating from a pod or a group of
+// pods to a remote external system or multiple destinations as per configuration.
+//
+// It is consumed by the cluster-network-operator.
+// More specifically, given an EgressRouter CR with , the CNO will create and manage:
+// - A service called
+// - An egress pod called
+// - A NAD called
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+//
+// EgressRouter is a single egressrouter pod configuration object.
+// +k8s:openapi-gen=true
+// +openshift:compatibility-gen:level=1
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=egressrouters,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/851
+// +openshift:file-pattern=operatorOrdering=001
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=true
+// +kubebuilder:printcolumn:name="Condition",type=string,JSONPath=".status.conditions[*].type"
+// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=".status.conditions[*].status"
+type EgressRouter struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Specification of the desired egress router.
+ // +kubebuilder:validation:Required
+ Spec EgressRouterSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // Observed status of EgressRouter.
+ Status EgressRouterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// EgressRouterSpec contains the configuration for an egress router.
+// Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode.
+// Each config consists of parameters specific to that mode.
+// +k8s:openapi-gen=true
+// +kubebuilder:validation:Required
+type EgressRouterSpec struct {
+ // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="Redirect"
+ // +kubebuilder:default:="Redirect"
+ Mode EgressRouterMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=EgressRouterMode"`
+
+ // Redirect represents the configuration parameters specific to redirect mode.
+ Redirect *RedirectConfig `json:"redirect,omitempty" protobuf:"bytes,2,opt,name=redirect"`
+
+ // Specification of interface to create/use. The default is macvlan.
+ // Currently only macvlan is supported.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:default:={macvlan: {mode: Bridge}}
+ NetworkInterface EgressRouterInterface `json:"networkInterface" protobuf:"bytes,3,opt,name=networkInterface"`
+
+ // List of IP addresses to configure on the pod's secondary interface.
+ // +kubebuilder:validation:Required
+ Addresses []EgressRouterAddress `json:"addresses" protobuf:"bytes,4,rep,name=addresses"`
+}
+
+// EgressRouterMode defines the different types of modes that are supported for the egress router interface.
+// The default mode is "Redirect" and is the only supported mode currently.
+type EgressRouterMode string
+
+const (
+ // EgressRouterModeRedirect creates an egress router that sets up iptables rules to redirect traffic
+ // from its own IP address to one or more remote destination IP addresses.
+ EgressRouterModeRedirect EgressRouterMode = "Redirect"
+)
+
+// RedirectConfig represents the configuration parameters specific to redirect mode.
+type RedirectConfig struct {
+ // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode.
+ RedirectRules []L4RedirectRule `json:"redirectRules,omitempty" protobuf:"bytes,1,rep,name=redirectRules"`
+
+ // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6.
+ // If no redirect rules are specified, all traffic from the router are redirected to this IP.
+ // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP.
+ // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.
+ FallbackIP string `json:"fallbackIP,omitempty" protobuf:"bytes,2,opt,name=fallbackIP"`
+}
+
+// L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port.
+type L4RedirectRule struct {
+ // IP specifies the remote destination's IP address. Can be IPv4 or IPv6.
+ // +kubebuilder:validation:Required
+ DestinationIP string `json:"destinationIP" protobuf:"bytes,1,opt,name=destinationIP"`
+
+ // Port is the port number to which clients should send traffic to be redirected.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Maximum:=65535
+ // +kubebuilder:validation:Minimum:=1
+ Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
+
+ // Protocol can be TCP, SCTP or UDP.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP"
+ Protocol ProtocolType `json:"protocol" protobuf:"bytes,3,opt,name=protocol,casttype=ProtocolType"`
+
+ // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to.
+ // If unspecified, the value from "Port" is used.
+ // +kubebuilder:validation:Maximum:=65535
+ // +kubebuilder:validation:Minimum:=1
+ TargetPort int32 `json:"targetPort,omitempty" protobuf:"varint,4,opt,name=targetPort"`
+}
+
+// ProtocolType defines the protocol types that are supported
+type ProtocolType string
+
+const (
+ // ProtocolTypeTCP refers to the TCP protocol
+ ProtocolTypeTCP ProtocolType = "TCP"
+
+ // ProtocolTypeUDP refers to the UDP protocol
+ ProtocolTypeUDP ProtocolType = "UDP"
+
+ // ProtocolTypeSCTP refers to the SCTP protocol
+ ProtocolTypeSCTP ProtocolType = "SCTP"
+)
+
+// EgressRouterInterface contains the configuration of interface to create/use.
+type EgressRouterInterface struct {
+ // Arguments specific to the interfaceType macvlan
+ // +kubebuilder:default:={mode: Bridge}
+ Macvlan MacvlanConfig `json:"macvlan" protobuf:"bytes,1,opt,name=macvlan"`
+}
+
+// MacvlanMode defines the different types of modes that are supported for the macvlan interface.
+// source: https://man7.org/linux/man-pages/man8/ip-link.8.html
+type MacvlanMode string
+
+const (
+ // MacvlanModeBridge connects all endpoints directly to each other, communication is not redirected through the physical interface's peer.
+ MacvlanModeBridge MacvlanMode = "Bridge"
+
+ // MacvlanModePrivate does not allow communication between macvlan instances on the same physical interface,
+ // even if the external switch supports hairpin mode.
+ MacvlanModePrivate MacvlanMode = "Private"
+
+ // MacvlanModeVEPA is the Virtual Ethernet Port Aggregator mode. Data from one macvlan instance to the other on the
+ // same physical interface is transmitted over the physical interface. Either the attached switch needs
+ // to support hairpin mode, or there must be a TCP/IP router forwarding the packets in order to allow
+ // communication. This is the default mode.
+ MacvlanModeVEPA MacvlanMode = "VEPA"
+
+ // MacvlanModePassthru mode gives more power to a single endpoint, usually in macvtap mode.
+ // It is not allowed for more than one endpoint on the same physical interface. All traffic will be forwarded
+ // to this endpoint, allowing virtio guests to change MAC address or set promiscuous mode in order to bridge the
+ // interface or create vlan interfaces on top of it.
+ MacvlanModePassthru MacvlanMode = "Passthru"
+)
+
+// MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType
+type MacvlanConfig struct {
+ // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge".
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru"
+ // +kubebuilder:default:="Bridge"
+ Mode MacvlanMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=MacvlanMode"`
+
+ // Name of the master interface. Need not be specified if it can be inferred from the IP address.
+ Master string `json:"master,omitempty" protobuf:"bytes,2,opt,name=master"`
+}
+
+// EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface
+// +kubebuilder:validation:Required
+type EgressRouterAddress struct {
+ // IP is the address to configure on the router's interface. Can be IPv4 or IPv6.
+ // +kubebuilder:validation:Required
+ IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
+ // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6.
+ Gateway string `json:"gateway,omitempty" protobuf:"bytes,2,opt,name=gateway"`
+}
+
+// EgressRouterStatusConditionType is an aspect of the router's state.
+type EgressRouterStatusConditionType string
+
+const (
+ // EgressRouterAvailable indicates that the EgressRouter (the associated pod, service, NAD), is functional and available in the cluster.
+ EgressRouterAvailable EgressRouterStatusConditionType = "Available"
+
+ // EgressRouterProgressing indicates that the router is actively rolling out new code,
+ // propagating config changes, or otherwise moving from one steady state to
+ // another.
+ EgressRouterProgressing EgressRouterStatusConditionType = "Progressing"
+
+ // EgressRouterDegraded indicates that the router's current state does not match its
+ // desired state over a period of time resulting in a lower quality of service.
+ EgressRouterDegraded EgressRouterStatusConditionType = "Degraded"
+)
+
+// ConditionStatus defines the status of each of EgressRouterStatusConditionType.
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// EgressRouterStatusCondition represents the state of the egress router's
+// managed and monitored components.
+// +k8s:deepcopy-gen=true
+type EgressRouterStatusCondition struct {
+ // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded"
+ // +required
+ Type EgressRouterStatusConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=EgressRouterStatusConditionType"`
+
+ // Status of the condition, one of True, False, Unknown.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Enum="True";"False";"Unknown"
+ // +required
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+
+ // LastTransitionTime is the time of the last update to the current status property.
+ // +kubebuilder:validation:Required
+ // +required
+ // +nullable
+ LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+
+ // Reason is the CamelCase reason for the condition's current status.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+
+ // Message provides additional information about the current condition.
+ // This is only to be consumed by humans. It may contain Line Feed
+ // characters (U+000A), which should be rendered as new lines.
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// EgressRouterStatus contains the observed status of EgressRouter. Read-only.
+type EgressRouterStatus struct {
+ // Observed status of the egress router
+ // +kubebuilder:validation:Required
+ Conditions []EgressRouterStatusCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EgressRouterList is the list of egress router pods requested.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type EgressRouterList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ Items []EgressRouter `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..e58d3dfaa0
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go
@@ -0,0 +1,224 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRouter) DeepCopyInto(out *EgressRouter) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouter.
+func (in *EgressRouter) DeepCopy() *EgressRouter {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRouter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EgressRouter) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRouterAddress) DeepCopyInto(out *EgressRouterAddress) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterAddress.
+func (in *EgressRouterAddress) DeepCopy() *EgressRouterAddress {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRouterAddress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRouterInterface) DeepCopyInto(out *EgressRouterInterface) {
+ *out = *in
+ out.Macvlan = in.Macvlan
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterInterface.
+func (in *EgressRouterInterface) DeepCopy() *EgressRouterInterface {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRouterInterface)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRouterList) DeepCopyInto(out *EgressRouterList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EgressRouter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterList.
+func (in *EgressRouterList) DeepCopy() *EgressRouterList {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRouterList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EgressRouterList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRouterSpec) DeepCopyInto(out *EgressRouterSpec) {
+ *out = *in
+ if in.Redirect != nil {
+ in, out := &in.Redirect, &out.Redirect
+ *out = new(RedirectConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ out.NetworkInterface = in.NetworkInterface
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]EgressRouterAddress, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterSpec.
+func (in *EgressRouterSpec) DeepCopy() *EgressRouterSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRouterSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRouterStatus) DeepCopyInto(out *EgressRouterStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]EgressRouterStatusCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterStatus.
+func (in *EgressRouterStatus) DeepCopy() *EgressRouterStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRouterStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRouterStatusCondition) DeepCopyInto(out *EgressRouterStatusCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterStatusCondition.
+func (in *EgressRouterStatusCondition) DeepCopy() *EgressRouterStatusCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRouterStatusCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *L4RedirectRule) DeepCopyInto(out *L4RedirectRule) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4RedirectRule.
+func (in *L4RedirectRule) DeepCopy() *L4RedirectRule {
+ if in == nil {
+ return nil
+ }
+ out := new(L4RedirectRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MacvlanConfig) DeepCopyInto(out *MacvlanConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MacvlanConfig.
+func (in *MacvlanConfig) DeepCopy() *MacvlanConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MacvlanConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RedirectConfig) DeepCopyInto(out *RedirectConfig) {
+ *out = *in
+ if in.RedirectRules != nil {
+ in, out := &in.RedirectRules, &out.RedirectRules
+ *out = make([]L4RedirectRule, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectConfig.
+func (in *RedirectConfig) DeepCopy() *RedirectConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RedirectConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..8af113091e
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,29 @@
+egressrouters.network.operator.openshift.io:
+ Annotations:
+ include.release.openshift.io/ibm-cloud-managed: "true"
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/851
+ CRDName: egressrouters.network.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "001"
+ FilenameRunLevel: ""
+ GroupName: network.operator.openshift.io
+ HasStatus: true
+ KindName: EgressRouter
+ Labels: {}
+ PluralName: egressrouters
+ PrinterColumns:
+ - jsonPath: .status.conditions[*].type
+ name: Condition
+ type: string
+ - jsonPath: .status.conditions[*].status
+ name: Status
+ type: string
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..97bec9e29d
--- /dev/null
+++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,119 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_EgressRouter = map[string]string{
+ "": "EgressRouter is a feature allowing the user to define an egress router that acts as a bridge between pods and external systems. The egress router runs a service that redirects egress traffic originating from a pod or a group of pods to a remote external system or multiple destinations as per configuration.\n\nIt is consumed by the cluster-network-operator. More specifically, given an EgressRouter CR with , the CNO will create and manage: - A service called - An egress pod called - A NAD called \n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).\n\nEgressRouter is a single egressrouter pod configuration object.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Specification of the desired egress router.",
+ "status": "Observed status of EgressRouter.",
+}
+
+func (EgressRouter) SwaggerDoc() map[string]string {
+ return map_EgressRouter
+}
+
+var map_EgressRouterAddress = map[string]string{
+ "": "EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface",
+ "ip": "IP is the address to configure on the router's interface. Can be IPv4 or IPv6.",
+ "gateway": "IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6.",
+}
+
+func (EgressRouterAddress) SwaggerDoc() map[string]string {
+ return map_EgressRouterAddress
+}
+
+var map_EgressRouterInterface = map[string]string{
+ "": "EgressRouterInterface contains the configuration of interface to create/use.",
+ "macvlan": "Arguments specific to the interfaceType macvlan",
+}
+
+func (EgressRouterInterface) SwaggerDoc() map[string]string {
+ return map_EgressRouterInterface
+}
+
+var map_EgressRouterList = map[string]string{
+ "": "EgressRouterList is the list of egress router pods requested.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (EgressRouterList) SwaggerDoc() map[string]string {
+ return map_EgressRouterList
+}
+
+var map_EgressRouterSpec = map[string]string{
+ "": "EgressRouterSpec contains the configuration for an egress router. Mode, networkInterface and addresses fields must be specified along with exactly one \"Config\" that matches the mode. Each config consists of parameters specific to that mode.",
+ "mode": "Mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.",
+ "redirect": "Redirect represents the configuration parameters specific to redirect mode.",
+ "networkInterface": "Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported.",
+ "addresses": "List of IP addresses to configure on the pod's secondary interface.",
+}
+
+func (EgressRouterSpec) SwaggerDoc() map[string]string {
+ return map_EgressRouterSpec
+}
+
+var map_EgressRouterStatus = map[string]string{
+ "": "EgressRouterStatus contains the observed status of EgressRouter. Read-only.",
+ "conditions": "Observed status of the egress router",
+}
+
+func (EgressRouterStatus) SwaggerDoc() map[string]string {
+ return map_EgressRouterStatus
+}
+
+var map_EgressRouterStatusCondition = map[string]string{
+ "": "EgressRouterStatusCondition represents the state of the egress router's managed and monitored components.",
+ "type": "Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded",
+ "status": "Status of the condition, one of True, False, Unknown.",
+ "lastTransitionTime": "LastTransitionTime is the time of the last update to the current status property.",
+ "reason": "Reason is the CamelCase reason for the condition's current status.",
+ "message": "Message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.",
+}
+
+func (EgressRouterStatusCondition) SwaggerDoc() map[string]string {
+ return map_EgressRouterStatusCondition
+}
+
+var map_L4RedirectRule = map[string]string{
+ "": "L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port.",
+ "destinationIP": "IP specifies the remote destination's IP address. Can be IPv4 or IPv6.",
+ "port": "Port is the port number to which clients should send traffic to be redirected.",
+ "protocol": "Protocol can be TCP, SCTP or UDP.",
+ "targetPort": "TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from \"Port\" is used.",
+}
+
+func (L4RedirectRule) SwaggerDoc() map[string]string {
+ return map_L4RedirectRule
+}
+
+var map_MacvlanConfig = map[string]string{
+ "": "MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType",
+ "mode": "Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is \"Bridge\".",
+ "master": "Name of the master interface. Need not be specified if it can be inferred from the IP address.",
+}
+
+func (MacvlanConfig) SwaggerDoc() map[string]string {
+ return map_MacvlanConfig
+}
+
+var map_RedirectConfig = map[string]string{
+ "": "RedirectConfig represents the configuration parameters specific to redirect mode.",
+ "redirectRules": "List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode.",
+ "fallbackIP": "FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.",
+}
+
+func (RedirectConfig) SwaggerDoc() map[string]string {
+ return map_RedirectConfig
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/oauth/.codegen.yaml b/vendor/github.com/openshift/api/oauth/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/oauth/install.go b/vendor/github.com/openshift/api/oauth/install.go
new file mode 100644
index 0000000000..6bf63539d9
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/install.go
@@ -0,0 +1,26 @@
+package oauth
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ oauthv1 "github.com/openshift/api/oauth/v1"
+)
+
+const (
+ GroupName = "oauth.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(oauthv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/oauth/v1/doc.go b/vendor/github.com/openshift/api/oauth/v1/doc.go
new file mode 100644
index 0000000000..cae9e70d4a
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/oauth/apis/oauth
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=oauth.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.pb.go b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go
new file mode 100644
index 0000000000..a79c468020
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go
@@ -0,0 +1,4624 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/oauth/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *ClusterRoleScopeRestriction) Reset() { *m = ClusterRoleScopeRestriction{} }
+func (*ClusterRoleScopeRestriction) ProtoMessage() {}
+func (*ClusterRoleScopeRestriction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{0}
+}
+func (m *ClusterRoleScopeRestriction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterRoleScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterRoleScopeRestriction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterRoleScopeRestriction.Merge(m, src)
+}
+func (m *ClusterRoleScopeRestriction) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterRoleScopeRestriction) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterRoleScopeRestriction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterRoleScopeRestriction proto.InternalMessageInfo
+
+func (m *OAuthAccessToken) Reset() { *m = OAuthAccessToken{} }
+func (*OAuthAccessToken) ProtoMessage() {}
+func (*OAuthAccessToken) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{1}
+}
+func (m *OAuthAccessToken) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthAccessToken) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthAccessToken.Merge(m, src)
+}
+func (m *OAuthAccessToken) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthAccessToken) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthAccessToken.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthAccessToken proto.InternalMessageInfo
+
+func (m *OAuthAccessTokenList) Reset() { *m = OAuthAccessTokenList{} }
+func (*OAuthAccessTokenList) ProtoMessage() {}
+func (*OAuthAccessTokenList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{2}
+}
+func (m *OAuthAccessTokenList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthAccessTokenList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthAccessTokenList.Merge(m, src)
+}
+func (m *OAuthAccessTokenList) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthAccessTokenList) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthAccessTokenList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthAccessTokenList proto.InternalMessageInfo
+
+func (m *OAuthAuthorizeToken) Reset() { *m = OAuthAuthorizeToken{} }
+func (*OAuthAuthorizeToken) ProtoMessage() {}
+func (*OAuthAuthorizeToken) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{3}
+}
+func (m *OAuthAuthorizeToken) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthAuthorizeToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthAuthorizeToken) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthAuthorizeToken.Merge(m, src)
+}
+func (m *OAuthAuthorizeToken) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthAuthorizeToken) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthAuthorizeToken.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthAuthorizeToken proto.InternalMessageInfo
+
+func (m *OAuthAuthorizeTokenList) Reset() { *m = OAuthAuthorizeTokenList{} }
+func (*OAuthAuthorizeTokenList) ProtoMessage() {}
+func (*OAuthAuthorizeTokenList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{4}
+}
+func (m *OAuthAuthorizeTokenList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthAuthorizeTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthAuthorizeTokenList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthAuthorizeTokenList.Merge(m, src)
+}
+func (m *OAuthAuthorizeTokenList) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthAuthorizeTokenList) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthAuthorizeTokenList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthAuthorizeTokenList proto.InternalMessageInfo
+
+func (m *OAuthClient) Reset() { *m = OAuthClient{} }
+func (*OAuthClient) ProtoMessage() {}
+func (*OAuthClient) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{5}
+}
+func (m *OAuthClient) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthClient) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthClient.Merge(m, src)
+}
+func (m *OAuthClient) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthClient) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthClient.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthClient proto.InternalMessageInfo
+
+func (m *OAuthClientAuthorization) Reset() { *m = OAuthClientAuthorization{} }
+func (*OAuthClientAuthorization) ProtoMessage() {}
+func (*OAuthClientAuthorization) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{6}
+}
+func (m *OAuthClientAuthorization) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthClientAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthClientAuthorization) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthClientAuthorization.Merge(m, src)
+}
+func (m *OAuthClientAuthorization) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthClientAuthorization) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthClientAuthorization.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthClientAuthorization proto.InternalMessageInfo
+
+func (m *OAuthClientAuthorizationList) Reset() { *m = OAuthClientAuthorizationList{} }
+func (*OAuthClientAuthorizationList) ProtoMessage() {}
+func (*OAuthClientAuthorizationList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{7}
+}
+func (m *OAuthClientAuthorizationList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthClientAuthorizationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthClientAuthorizationList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthClientAuthorizationList.Merge(m, src)
+}
+func (m *OAuthClientAuthorizationList) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthClientAuthorizationList) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthClientAuthorizationList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthClientAuthorizationList proto.InternalMessageInfo
+
+func (m *OAuthClientList) Reset() { *m = OAuthClientList{} }
+func (*OAuthClientList) ProtoMessage() {}
+func (*OAuthClientList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{8}
+}
+func (m *OAuthClientList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthClientList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthClientList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthClientList.Merge(m, src)
+}
+func (m *OAuthClientList) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthClientList) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthClientList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthClientList proto.InternalMessageInfo
+
+func (m *OAuthRedirectReference) Reset() { *m = OAuthRedirectReference{} }
+func (*OAuthRedirectReference) ProtoMessage() {}
+func (*OAuthRedirectReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{9}
+}
+func (m *OAuthRedirectReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OAuthRedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OAuthRedirectReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OAuthRedirectReference.Merge(m, src)
+}
+func (m *OAuthRedirectReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *OAuthRedirectReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_OAuthRedirectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OAuthRedirectReference proto.InternalMessageInfo
+
+func (m *RedirectReference) Reset() { *m = RedirectReference{} }
+func (*RedirectReference) ProtoMessage() {}
+func (*RedirectReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{10}
+}
+func (m *RedirectReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *RedirectReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RedirectReference.Merge(m, src)
+}
+func (m *RedirectReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *RedirectReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_RedirectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RedirectReference proto.InternalMessageInfo
+
+func (m *ScopeRestriction) Reset() { *m = ScopeRestriction{} }
+func (*ScopeRestriction) ProtoMessage() {}
+func (*ScopeRestriction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{11}
+}
+func (m *ScopeRestriction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ScopeRestriction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ScopeRestriction.Merge(m, src)
+}
+func (m *ScopeRestriction) XXX_Size() int {
+ return m.Size()
+}
+func (m *ScopeRestriction) XXX_DiscardUnknown() {
+ xxx_messageInfo_ScopeRestriction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ScopeRestriction proto.InternalMessageInfo
+
+func (m *UserOAuthAccessToken) Reset() { *m = UserOAuthAccessToken{} }
+func (*UserOAuthAccessToken) ProtoMessage() {}
+func (*UserOAuthAccessToken) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{12}
+}
+func (m *UserOAuthAccessToken) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UserOAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *UserOAuthAccessToken) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserOAuthAccessToken.Merge(m, src)
+}
+func (m *UserOAuthAccessToken) XXX_Size() int {
+ return m.Size()
+}
+func (m *UserOAuthAccessToken) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserOAuthAccessToken.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserOAuthAccessToken proto.InternalMessageInfo
+
+func (m *UserOAuthAccessTokenList) Reset() { *m = UserOAuthAccessTokenList{} }
+func (*UserOAuthAccessTokenList) ProtoMessage() {}
+func (*UserOAuthAccessTokenList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bd688dca7ea39c8a, []int{13}
+}
+func (m *UserOAuthAccessTokenList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UserOAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *UserOAuthAccessTokenList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserOAuthAccessTokenList.Merge(m, src)
+}
+func (m *UserOAuthAccessTokenList) XXX_Size() int {
+ return m.Size()
+}
+func (m *UserOAuthAccessTokenList) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserOAuthAccessTokenList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserOAuthAccessTokenList proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*ClusterRoleScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ClusterRoleScopeRestriction")
+ proto.RegisterType((*OAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessToken")
+ proto.RegisterType((*OAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessTokenList")
+ proto.RegisterType((*OAuthAuthorizeToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeToken")
+ proto.RegisterType((*OAuthAuthorizeTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeTokenList")
+ proto.RegisterType((*OAuthClient)(nil), "github.com.openshift.api.oauth.v1.OAuthClient")
+ proto.RegisterType((*OAuthClientAuthorization)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorization")
+ proto.RegisterType((*OAuthClientAuthorizationList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorizationList")
+ proto.RegisterType((*OAuthClientList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientList")
+ proto.RegisterType((*OAuthRedirectReference)(nil), "github.com.openshift.api.oauth.v1.OAuthRedirectReference")
+ proto.RegisterType((*RedirectReference)(nil), "github.com.openshift.api.oauth.v1.RedirectReference")
+ proto.RegisterType((*ScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ScopeRestriction")
+ proto.RegisterType((*UserOAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessToken")
+ proto.RegisterType((*UserOAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessTokenList")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/oauth/v1/generated.proto", fileDescriptor_bd688dca7ea39c8a)
+}
+
+var fileDescriptor_bd688dca7ea39c8a = []byte{
+ // 1272 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5,
+ 0x17, 0xcf, 0x36, 0x76, 0x62, 0x3f, 0x37, 0xbf, 0x26, 0x4d, 0xbb, 0xdf, 0xb6, 0x5f, 0xdb, 0x75,
+ 0x24, 0x1a, 0x04, 0xac, 0x49, 0x28, 0xa5, 0x52, 0xa5, 0x4a, 0x76, 0xa8, 0x4a, 0x04, 0x69, 0xa5,
+ 0x49, 0x03, 0x15, 0xf4, 0xd0, 0xe9, 0xee, 0x8b, 0x3d, 0x64, 0xbd, 0xbb, 0xec, 0x8c, 0x43, 0x83,
+ 0x7a, 0xe0, 0xc2, 0x9d, 0x7f, 0x84, 0x0b, 0x77, 0x0e, 0x48, 0x1c, 0x7a, 0x42, 0x3d, 0x20, 0xd4,
+ 0x93, 0x45, 0x8c, 0x38, 0xf0, 0x2f, 0x70, 0x42, 0x3b, 0xbb, 0xde, 0x1f, 0x8e, 0x4d, 0xdc, 0x03,
+ 0x11, 0x87, 0xde, 0xbc, 0xef, 0x7d, 0x3e, 0x6f, 0xde, 0xcc, 0xbc, 0xcf, 0x9b, 0x19, 0xc3, 0x7a,
+ 0x8b, 0xcb, 0x76, 0xf7, 0xb1, 0x61, 0xba, 0x9d, 0xba, 0xeb, 0xa1, 0x23, 0xda, 0x7c, 0x4f, 0xd6,
+ 0x99, 0xc7, 0xeb, 0x2e, 0xeb, 0xca, 0x76, 0xfd, 0x60, 0xbd, 0xde, 0x42, 0x07, 0x7d, 0x26, 0xd1,
+ 0x32, 0x3c, 0xdf, 0x95, 0x2e, 0xb9, 0x92, 0x50, 0x8c, 0x98, 0x62, 0x30, 0x8f, 0x1b, 0x8a, 0x62,
+ 0x1c, 0xac, 0x5f, 0x7c, 0x2b, 0x15, 0xb5, 0xe5, 0xb6, 0xdc, 0xba, 0x62, 0x3e, 0xee, 0xee, 0xa9,
+ 0x2f, 0xf5, 0xa1, 0x7e, 0x85, 0x11, 0x2f, 0x5e, 0xdb, 0xbf, 0x21, 0x0c, 0xee, 0x06, 0xc3, 0x76,
+ 0x98, 0xd9, 0xe6, 0x0e, 0xfa, 0x87, 0x75, 0x6f, 0xbf, 0x15, 0x18, 0x44, 0xbd, 0x83, 0x92, 0x8d,
+ 0xc8, 0xe3, 0xe2, 0xf5, 0x71, 0x2c, 0xbf, 0xeb, 0x48, 0xde, 0xc1, 0xba, 0x30, 0xdb, 0xd8, 0x61,
+ 0xc3, 0xbc, 0xda, 0x0f, 0x1a, 0x5c, 0xda, 0xb4, 0xbb, 0x42, 0xa2, 0x4f, 0x5d, 0x1b, 0x77, 0x4c,
+ 0xd7, 0x43, 0x8a, 0x42, 0xfa, 0xdc, 0x94, 0xdc, 0x75, 0xc8, 0x1b, 0x50, 0xf4, 0x5d, 0x1b, 0xef,
+ 0xb2, 0x0e, 0x0a, 0x5d, 0xab, 0x4e, 0xaf, 0x15, 0x9b, 0x73, 0xfd, 0x5e, 0xa5, 0x48, 0x07, 0x46,
+ 0x9a, 0xf8, 0x89, 0x01, 0xe0, 0x04, 0x3f, 0x3c, 0x66, 0xa2, 0xd0, 0xcf, 0x28, 0xf4, 0x7c, 0xbf,
+ 0x57, 0x81, 0xbb, 0xb1, 0x95, 0xa6, 0x10, 0xa4, 0x01, 0x0b, 0xcc, 0xb6, 0xdd, 0x2f, 0x6f, 0x0b,
+ 0x93, 0xd9, 0x2c, 0x18, 0x4f, 0x9f, 0xae, 0x6a, 0x6b, 0x85, 0xe6, 0x85, 0x67, 0xbd, 0xca, 0x54,
+ 0xbf, 0x57, 0x59, 0x68, 0x64, 0xdd, 0x74, 0x18, 0x5f, 0xfb, 0x23, 0x07, 0x8b, 0xf7, 0x1a, 0x5d,
+ 0xd9, 0x6e, 0x98, 0x26, 0x0a, 0x71, 0xdf, 0xdd, 0x47, 0x87, 0x3c, 0x82, 0x42, 0xb0, 0x4e, 0x16,
+ 0x93, 0x4c, 0xd7, 0xaa, 0xda, 0x5a, 0x69, 0xe3, 0x6d, 0x23, 0x5c, 0x1f, 0x23, 0xbd, 0x3e, 0x86,
+ 0xb7, 0xdf, 0x0a, 0x0c, 0xc2, 0x08, 0xd0, 0xc6, 0xc1, 0xba, 0x71, 0xef, 0xf1, 0xe7, 0x68, 0xca,
+ 0x6d, 0x94, 0xac, 0x49, 0xa2, 0x14, 0x20, 0xb1, 0xd1, 0x38, 0x2a, 0xd9, 0x00, 0x30, 0x6d, 0x8e,
+ 0x8e, 0x0c, 0x66, 0xa6, 0x9f, 0xa9, 0x6a, 0x6b, 0xc5, 0x84, 0xb1, 0x19, 0x7b, 0x68, 0x0a, 0x45,
+ 0xea, 0x50, 0xc4, 0x27, 0x1e, 0xf7, 0x51, 0x6c, 0x85, 0xf3, 0x9c, 0x6e, 0x2e, 0x45, 0x94, 0xe2,
+ 0xed, 0x81, 0x83, 0x26, 0x18, 0x52, 0x83, 0x19, 0x11, 0xec, 0x87, 0xd0, 0x73, 0x6a, 0x29, 0xa1,
+ 0xdf, 0xab, 0xcc, 0xa8, 0x1d, 0x12, 0x34, 0xf2, 0x90, 0x77, 0xa1, 0xe4, 0xa3, 0xc5, 0x7d, 0x34,
+ 0xe5, 0x2e, 0xdd, 0xd2, 0xf3, 0x2a, 0x93, 0xe5, 0x28, 0x6c, 0x89, 0x26, 0x2e, 0x9a, 0xc6, 0x91,
+ 0x37, 0xa1, 0xd0, 0x15, 0xe8, 0xab, 0xec, 0x67, 0x14, 0x67, 0x31, 0xe2, 0x14, 0x76, 0x23, 0x3b,
+ 0x8d, 0x11, 0xe4, 0x75, 0x98, 0x0d, 0x7e, 0xef, 0x6e, 0xbd, 0xaf, 0xcf, 0x2a, 0xf0, 0x42, 0x04,
+ 0x9e, 0xdd, 0x0d, 0xcd, 0x74, 0xe0, 0x27, 0xb7, 0x60, 0x3e, 0xa8, 0x7b, 0xd7, 0xe7, 0x5f, 0xa1,
+ 0xda, 0x0c, 0xbd, 0xa0, 0x18, 0xe7, 0x23, 0xc6, 0x7c, 0x23, 0xe3, 0xa5, 0x43, 0x68, 0x72, 0x03,
+ 0xce, 0xfa, 0xb8, 0xe7, 0xa3, 0x68, 0x87, 0xec, 0xa2, 0x62, 0x9f, 0x8b, 0xd8, 0x67, 0x69, 0xca,
+ 0x47, 0x33, 0x48, 0xf2, 0x10, 0x74, 0xee, 0x30, 0x53, 0xf2, 0x03, 0x2e, 0x0f, 0xef, 0xf3, 0x0e,
+ 0xba, 0x5d, 0xb9, 0x83, 0xa6, 0xeb, 0x58, 0x42, 0x87, 0xaa, 0xb6, 0x96, 0x6f, 0x56, 0xa3, 0x28,
+ 0xfa, 0xd6, 0x18, 0x1c, 0x1d, 0x1b, 0xa1, 0xf6, 0xb3, 0x06, 0xe7, 0x86, 0xeb, 0xec, 0x23, 0x2e,
+ 0x24, 0x79, 0x78, 0xac, 0xd6, 0x8c, 0xc9, 0x6a, 0x2d, 0x60, 0xab, 0x4a, 0x8b, 0x57, 0x7e, 0x60,
+ 0x49, 0xd5, 0xd9, 0x03, 0xc8, 0x73, 0x89, 0x9d, 0x50, 0x4c, 0xa5, 0x8d, 0x77, 0x8c, 0x13, 0xdb,
+ 0x8d, 0x31, 0x9c, 0x65, 0x73, 0x2e, 0x8a, 0x9f, 0xdf, 0x0a, 0x22, 0xd1, 0x30, 0x60, 0xed, 0xc7,
+ 0x1c, 0x2c, 0x87, 0xd0, 0xec, 0x06, 0xbc, 0xd2, 0xce, 0x49, 0xda, 0x59, 0x85, 0xbc, 0x90, 0x4c,
+ 0x0e, 0x84, 0x13, 0x2f, 0xef, 0x4e, 0x60, 0xa4, 0xa1, 0x2f, 0x23, 0xb0, 0xd9, 0x97, 0x11, 0x58,
+ 0xe1, 0x04, 0x81, 0xdd, 0x84, 0x39, 0xd3, 0xb5, 0x70, 0xb3, 0xcd, 0x6c, 0x1b, 0x9d, 0x16, 0x46,
+ 0x0a, 0x59, 0x89, 0x08, 0x73, 0x9b, 0x69, 0x27, 0xcd, 0x62, 0xc9, 0x36, 0x2c, 0x67, 0x0c, 0xdb,
+ 0x28, 0xdb, 0xae, 0xa5, 0xe4, 0x51, 0x6c, 0x5e, 0x8a, 0x42, 0x2c, 0x6f, 0x1e, 0x87, 0xd0, 0x51,
+ 0xbc, 0xda, 0x2f, 0x1a, 0x5c, 0x18, 0x51, 0x43, 0xa7, 0xa0, 0x8b, 0xcf, 0xb2, 0xba, 0xb8, 0x3e,
+ 0xb1, 0x2e, 0x32, 0x89, 0x8e, 0x91, 0xc6, 0x37, 0x33, 0x50, 0x52, 0xe8, 0xb0, 0x18, 0x4f, 0x41,
+ 0x12, 0xaf, 0xc1, 0x8c, 0x40, 0xd3, 0x47, 0x19, 0xc9, 0x61, 0x3e, 0x42, 0xcf, 0xec, 0x28, 0x2b,
+ 0x8d, 0xbc, 0x64, 0x13, 0x96, 0x98, 0x65, 0xf1, 0xe0, 0xe4, 0x63, 0x76, 0xe8, 0x13, 0xfa, 0xb4,
+ 0x2a, 0xf0, 0x95, 0x7e, 0xaf, 0xb2, 0xd4, 0x18, 0x76, 0xd2, 0xe3, 0x78, 0xb2, 0x03, 0x2b, 0x3e,
+ 0x0a, 0xcf, 0x75, 0xac, 0x4f, 0xb8, 0x6c, 0xc7, 0x7b, 0x1a, 0x28, 0x25, 0x38, 0x7b, 0xff, 0x1f,
+ 0x8d, 0xbd, 0x42, 0x47, 0x81, 0xe8, 0x68, 0x2e, 0xb9, 0x16, 0xf4, 0xed, 0x58, 0x23, 0x42, 0xcf,
+ 0xab, 0xa4, 0x16, 0xc3, 0x9e, 0x9d, 0xd8, 0x69, 0x06, 0x45, 0xb6, 0xa0, 0xd4, 0xf2, 0x99, 0x23,
+ 0xa3, 0x3a, 0x0c, 0x05, 0x75, 0x75, 0xa0, 0xc0, 0x3b, 0x89, 0xeb, 0xaf, 0x5e, 0x65, 0x51, 0x7d,
+ 0x7e, 0xc0, 0x1c, 0xcb, 0x46, 0xff, 0xfe, 0xa1, 0x87, 0x34, 0xcd, 0x25, 0x4f, 0x61, 0x49, 0x0c,
+ 0x5d, 0x5e, 0x84, 0x3e, 0x3b, 0x71, 0xd7, 0x1c, 0xbe, 0xf8, 0x34, 0xff, 0x17, 0x65, 0xb1, 0x34,
+ 0xec, 0x11, 0xf4, 0xf8, 0x40, 0xe4, 0x01, 0xe8, 0x2c, 0x69, 0xb9, 0xdb, 0xec, 0x49, 0xa3, 0x85,
+ 0x83, 0xc3, 0xa7, 0xa0, 0x0e, 0x9f, 0xcb, 0xc1, 0xc1, 0xd3, 0x18, 0x83, 0xa1, 0x63, 0xd9, 0xe4,
+ 0x10, 0x56, 0x53, 0xbe, 0x71, 0x27, 0x97, 0xea, 0x02, 0xf9, 0xe6, 0xd5, 0x7e, 0xaf, 0xb2, 0xda,
+ 0x38, 0x19, 0x4e, 0x27, 0x89, 0x59, 0xfb, 0xee, 0x0c, 0xe8, 0x29, 0x1d, 0x0c, 0xb4, 0xa3, 0x2e,
+ 0x5e, 0xff, 0xd1, 0x73, 0x22, 0xdd, 0x76, 0xa7, 0x5f, 0xa6, 0xed, 0xe6, 0x4e, 0x68, 0xbb, 0xc9,
+ 0x79, 0x92, 0x1f, 0x77, 0x9e, 0xd4, 0x7a, 0x1a, 0x5c, 0x1e, 0xb7, 0x5e, 0xa7, 0xd0, 0x13, 0x1f,
+ 0x65, 0x7b, 0xe2, 0xcd, 0x49, 0x7b, 0xe2, 0x88, 0x6c, 0xc7, 0x34, 0xc6, 0x9f, 0x34, 0x58, 0x48,
+ 0x51, 0x4e, 0x61, 0x4e, 0x3b, 0xd9, 0x39, 0x19, 0x2f, 0x37, 0xa7, 0x31, 0xd3, 0x38, 0xd2, 0xe0,
+ 0xbc, 0x42, 0x0d, 0x3a, 0x13, 0xc5, 0x3d, 0xf4, 0xd1, 0x31, 0xf1, 0x14, 0xaa, 0x1a, 0xa1, 0xe8,
+ 0x0f, 0x86, 0x53, 0x45, 0x5d, 0xda, 0xb8, 0x36, 0xc1, 0xac, 0x8e, 0xa5, 0x9a, 0xdc, 0x7f, 0x62,
+ 0x13, 0x4d, 0x22, 0xd7, 0x9e, 0xc2, 0xd2, 0xf1, 0xd9, 0xad, 0x42, 0xbe, 0xe5, 0xbb, 0x5d, 0x4f,
+ 0x4d, 0x2d, 0x75, 0x73, 0xb9, 0x13, 0x18, 0x69, 0xe8, 0x23, 0x55, 0xc8, 0xed, 0x73, 0xc7, 0x8a,
+ 0x04, 0x77, 0x36, 0xc2, 0xe4, 0x3e, 0xe4, 0x8e, 0x45, 0x95, 0x27, 0x40, 0x38, 0x89, 0xc0, 0x62,
+ 0x84, 0x12, 0x97, 0xf2, 0xd4, 0xbe, 0xd7, 0x60, 0x71, 0xc4, 0x53, 0xb2, 0x60, 0x73, 0x89, 0x3e,
+ 0xb3, 0x07, 0x2f, 0xc9, 0x85, 0xa0, 0xcb, 0xdf, 0x7e, 0xc2, 0x4c, 0xf9, 0x31, 0xb3, 0xbb, 0x28,
+ 0x68, 0x0c, 0x20, 0x5f, 0x40, 0xc9, 0x4c, 0x9e, 0xa5, 0xd1, 0x42, 0xdd, 0x9a, 0x60, 0xa1, 0xfe,
+ 0xe1, 0x31, 0x1b, 0x8e, 0x97, 0x02, 0xd0, 0xf4, 0x18, 0xb5, 0x3f, 0x73, 0x70, 0x2e, 0xd0, 0xfd,
+ 0xab, 0xe7, 0xe4, 0xab, 0xe7, 0xe4, 0xbf, 0xfd, 0x9c, 0xfc, 0x55, 0x03, 0x7d, 0x54, 0xad, 0x9d,
+ 0x42, 0x4b, 0x7d, 0x98, 0x6d, 0xa9, 0xef, 0x4d, 0xa0, 0xa9, 0x51, 0x99, 0x8e, 0xee, 0xad, 0xcd,
+ 0x3b, 0xcf, 0x8e, 0xca, 0x53, 0xcf, 0x8f, 0xca, 0x53, 0x2f, 0x8e, 0xca, 0x53, 0x5f, 0xf7, 0xcb,
+ 0xda, 0xb3, 0x7e, 0x59, 0x7b, 0xde, 0x2f, 0x6b, 0x2f, 0xfa, 0x65, 0xed, 0xb7, 0x7e, 0x59, 0xfb,
+ 0xf6, 0xf7, 0xf2, 0xd4, 0xa7, 0x57, 0x4e, 0xfc, 0xa3, 0xed, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xc6, 0xcf, 0x36, 0xd6, 0x8c, 0x13, 0x00, 0x00,
+}
+
+func (m *ClusterRoleScopeRestriction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterRoleScopeRestriction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterRoleScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.AllowEscalation {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ if len(m.Namespaces) > 0 {
+ for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Namespaces[iNdEx])
+ copy(dAtA[i:], m.Namespaces[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.RoleNames) > 0 {
+ for iNdEx := len(m.RoleNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.RoleNames[iNdEx])
+ copy(dAtA[i:], m.RoleNames[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleNames[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthAccessToken) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthAccessToken) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds))
+ i--
+ dAtA[i] = 0x50
+ i -= len(m.RefreshToken)
+ copy(dAtA[i:], m.RefreshToken)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken)))
+ i--
+ dAtA[i] = 0x4a
+ i -= len(m.AuthorizeToken)
+ copy(dAtA[i:], m.AuthorizeToken)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken)))
+ i--
+ dAtA[i] = 0x42
+ i -= len(m.UserUID)
+ copy(dAtA[i:], m.UserUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.UserName)
+ copy(dAtA[i:], m.UserName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.RedirectURI)
+ copy(dAtA[i:], m.RedirectURI)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI)))
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Scopes) > 0 {
+ for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Scopes[iNdEx])
+ copy(dAtA[i:], m.Scopes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.ClientName)
+ copy(dAtA[i:], m.ClientName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthAccessTokenList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthAuthorizeToken) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthAuthorizeToken) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthAuthorizeToken) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.CodeChallengeMethod)
+ copy(dAtA[i:], m.CodeChallengeMethod)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallengeMethod)))
+ i--
+ dAtA[i] = 0x52
+ i -= len(m.CodeChallenge)
+ copy(dAtA[i:], m.CodeChallenge)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallenge)))
+ i--
+ dAtA[i] = 0x4a
+ i -= len(m.UserUID)
+ copy(dAtA[i:], m.UserUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID)))
+ i--
+ dAtA[i] = 0x42
+ i -= len(m.UserName)
+ copy(dAtA[i:], m.UserName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.State)
+ copy(dAtA[i:], m.State)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.State)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.RedirectURI)
+ copy(dAtA[i:], m.RedirectURI)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI)))
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Scopes) > 0 {
+ for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Scopes[iNdEx])
+ copy(dAtA[i:], m.Scopes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.ClientName)
+ copy(dAtA[i:], m.ClientName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthAuthorizeTokenList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthAuthorizeTokenList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthAuthorizeTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthClient) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthClient) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthClient) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AccessTokenInactivityTimeoutSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenInactivityTimeoutSeconds))
+ i--
+ dAtA[i] = 0x48
+ }
+ if m.AccessTokenMaxAgeSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenMaxAgeSeconds))
+ i--
+ dAtA[i] = 0x40
+ }
+ if len(m.ScopeRestrictions) > 0 {
+ for iNdEx := len(m.ScopeRestrictions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ScopeRestrictions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ i -= len(m.GrantMethod)
+ copy(dAtA[i:], m.GrantMethod)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.GrantMethod)))
+ i--
+ dAtA[i] = 0x32
+ if len(m.RedirectURIs) > 0 {
+ for iNdEx := len(m.RedirectURIs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.RedirectURIs[iNdEx])
+ copy(dAtA[i:], m.RedirectURIs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURIs[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ i--
+ if m.RespondWithChallenges {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ if len(m.AdditionalSecrets) > 0 {
+ for iNdEx := len(m.AdditionalSecrets) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AdditionalSecrets[iNdEx])
+ copy(dAtA[i:], m.AdditionalSecrets[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalSecrets[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Secret)
+ copy(dAtA[i:], m.Secret)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthClientAuthorization) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthClientAuthorization) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthClientAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Scopes) > 0 {
+ for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Scopes[iNdEx])
+ copy(dAtA[i:], m.Scopes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ i -= len(m.UserUID)
+ copy(dAtA[i:], m.UserUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.UserName)
+ copy(dAtA[i:], m.UserName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.ClientName)
+ copy(dAtA[i:], m.ClientName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthClientAuthorizationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthClientAuthorizationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthClientAuthorizationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthClientList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthClientList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthClientList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *OAuthRedirectReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OAuthRedirectReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OAuthRedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *RedirectReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RedirectReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Group)
+ copy(dAtA[i:], m.Group)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ScopeRestriction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopeRestriction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterRole != nil {
+ {
+ size, err := m.ClusterRole.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ExactValues) > 0 {
+ for iNdEx := len(m.ExactValues) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ExactValues[iNdEx])
+ copy(dAtA[i:], m.ExactValues[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExactValues[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UserOAuthAccessToken) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UserOAuthAccessToken) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserOAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds))
+ i--
+ dAtA[i] = 0x50
+ i -= len(m.RefreshToken)
+ copy(dAtA[i:], m.RefreshToken)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken)))
+ i--
+ dAtA[i] = 0x4a
+ i -= len(m.AuthorizeToken)
+ copy(dAtA[i:], m.AuthorizeToken)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken)))
+ i--
+ dAtA[i] = 0x42
+ i -= len(m.UserUID)
+ copy(dAtA[i:], m.UserUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.UserName)
+ copy(dAtA[i:], m.UserName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.RedirectURI)
+ copy(dAtA[i:], m.RedirectURI)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI)))
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Scopes) > 0 {
+ for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Scopes[iNdEx])
+ copy(dAtA[i:], m.Scopes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn))
+ i--
+ dAtA[i] = 0x18
+ i -= len(m.ClientName)
+ copy(dAtA[i:], m.ClientName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *UserOAuthAccessTokenList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UserOAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserOAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ClusterRoleScopeRestriction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.RoleNames) > 0 {
+ for _, s := range m.RoleNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Namespaces) > 0 {
+ for _, s := range m.Namespaces {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 2
+ return n
+}
+
+func (m *OAuthAccessToken) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ClientName)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.ExpiresIn))
+ if len(m.Scopes) > 0 {
+ for _, s := range m.Scopes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.RedirectURI)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.AuthorizeToken)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.RefreshToken)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.InactivityTimeoutSeconds))
+ return n
+}
+
+func (m *OAuthAccessTokenList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *OAuthAuthorizeToken) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ClientName)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.ExpiresIn))
+ if len(m.Scopes) > 0 {
+ for _, s := range m.Scopes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.RedirectURI)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.State)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.CodeChallenge)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.CodeChallengeMethod)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *OAuthAuthorizeTokenList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *OAuthClient) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Secret)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AdditionalSecrets) > 0 {
+ for _, s := range m.AdditionalSecrets {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ n += 2
+ if len(m.RedirectURIs) > 0 {
+ for _, s := range m.RedirectURIs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.GrantMethod)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ScopeRestrictions) > 0 {
+ for _, e := range m.ScopeRestrictions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.AccessTokenMaxAgeSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.AccessTokenMaxAgeSeconds))
+ }
+ if m.AccessTokenInactivityTimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.AccessTokenInactivityTimeoutSeconds))
+ }
+ return n
+}
+
+func (m *OAuthClientAuthorization) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ClientName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Scopes) > 0 {
+ for _, s := range m.Scopes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *OAuthClientAuthorizationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *OAuthClientList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *OAuthRedirectReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Reference.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *RedirectReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ScopeRestriction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ExactValues) > 0 {
+ for _, s := range m.ExactValues {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.ClusterRole != nil {
+ l = m.ClusterRole.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *UserOAuthAccessToken) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ClientName)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.ExpiresIn))
+ if len(m.Scopes) > 0 {
+ for _, s := range m.Scopes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.RedirectURI)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UserUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.AuthorizeToken)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.RefreshToken)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.InactivityTimeoutSeconds))
+ return n
+}
+
+func (m *UserOAuthAccessTokenList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ClusterRoleScopeRestriction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterRoleScopeRestriction{`,
+ `RoleNames:` + fmt.Sprintf("%v", this.RoleNames) + `,`,
+ `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`,
+ `AllowEscalation:` + fmt.Sprintf("%v", this.AllowEscalation) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthAccessToken) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&OAuthAccessToken{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`,
+ `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`,
+ `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`,
+ `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`,
+ `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`,
+ `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`,
+ `AuthorizeToken:` + fmt.Sprintf("%v", this.AuthorizeToken) + `,`,
+ `RefreshToken:` + fmt.Sprintf("%v", this.RefreshToken) + `,`,
+ `InactivityTimeoutSeconds:` + fmt.Sprintf("%v", this.InactivityTimeoutSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthAccessTokenList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]OAuthAccessToken{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAccessToken", "OAuthAccessToken", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&OAuthAccessTokenList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthAuthorizeToken) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&OAuthAuthorizeToken{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`,
+ `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`,
+ `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`,
+ `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`,
+ `State:` + fmt.Sprintf("%v", this.State) + `,`,
+ `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`,
+ `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`,
+ `CodeChallenge:` + fmt.Sprintf("%v", this.CodeChallenge) + `,`,
+ `CodeChallengeMethod:` + fmt.Sprintf("%v", this.CodeChallengeMethod) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthAuthorizeTokenList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]OAuthAuthorizeToken{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAuthorizeToken", "OAuthAuthorizeToken", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&OAuthAuthorizeTokenList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthClient) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForScopeRestrictions := "[]ScopeRestriction{"
+ for _, f := range this.ScopeRestrictions {
+ repeatedStringForScopeRestrictions += strings.Replace(strings.Replace(f.String(), "ScopeRestriction", "ScopeRestriction", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForScopeRestrictions += "}"
+ s := strings.Join([]string{`&OAuthClient{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
+ `AdditionalSecrets:` + fmt.Sprintf("%v", this.AdditionalSecrets) + `,`,
+ `RespondWithChallenges:` + fmt.Sprintf("%v", this.RespondWithChallenges) + `,`,
+ `RedirectURIs:` + fmt.Sprintf("%v", this.RedirectURIs) + `,`,
+ `GrantMethod:` + fmt.Sprintf("%v", this.GrantMethod) + `,`,
+ `ScopeRestrictions:` + repeatedStringForScopeRestrictions + `,`,
+ `AccessTokenMaxAgeSeconds:` + valueToStringGenerated(this.AccessTokenMaxAgeSeconds) + `,`,
+ `AccessTokenInactivityTimeoutSeconds:` + valueToStringGenerated(this.AccessTokenInactivityTimeoutSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthClientAuthorization) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&OAuthClientAuthorization{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`,
+ `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`,
+ `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`,
+ `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthClientAuthorizationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]OAuthClientAuthorization{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClientAuthorization", "OAuthClientAuthorization", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&OAuthClientAuthorizationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthClientList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]OAuthClient{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClient", "OAuthClient", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&OAuthClientList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *OAuthRedirectReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&OAuthRedirectReference{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "RedirectReference", "RedirectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *RedirectReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&RedirectReference{`,
+ `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ScopeRestriction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ScopeRestriction{`,
+ `ExactValues:` + fmt.Sprintf("%v", this.ExactValues) + `,`,
+ `ClusterRole:` + strings.Replace(this.ClusterRole.String(), "ClusterRoleScopeRestriction", "ClusterRoleScopeRestriction", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UserOAuthAccessToken) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UserOAuthAccessToken{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`,
+ `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`,
+ `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`,
+ `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`,
+ `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`,
+ `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`,
+ `AuthorizeToken:` + fmt.Sprintf("%v", this.AuthorizeToken) + `,`,
+ `RefreshToken:` + fmt.Sprintf("%v", this.RefreshToken) + `,`,
+ `InactivityTimeoutSeconds:` + fmt.Sprintf("%v", this.InactivityTimeoutSeconds) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UserOAuthAccessTokenList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]UserOAuthAccessToken{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "UserOAuthAccessToken", "UserOAuthAccessToken", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&UserOAuthAccessTokenList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ClusterRoleScopeRestriction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterRoleScopeRestriction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterRoleScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RoleNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RoleNames = append(m.RoleNames, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowEscalation", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllowEscalation = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthAccessToken) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthAccessToken: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthAccessToken: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType)
+ }
+ m.ExpiresIn = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ExpiresIn |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RedirectURI = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserUID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthorizeToken", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AuthorizeToken = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RefreshToken", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RefreshToken = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InactivityTimeoutSeconds", wireType)
+ }
+ m.InactivityTimeoutSeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.InactivityTimeoutSeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthAccessTokenList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthAccessTokenList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthAccessTokenList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, OAuthAccessToken{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthAuthorizeToken) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthAuthorizeToken: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthAuthorizeToken: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType)
+ }
+ m.ExpiresIn = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ExpiresIn |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RedirectURI = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.State = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserUID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CodeChallenge", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CodeChallenge = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CodeChallengeMethod", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CodeChallengeMethod = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthAuthorizeTokenList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthAuthorizeTokenList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthAuthorizeTokenList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, OAuthAuthorizeToken{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthClient) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthClient: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthClient: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Secret = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdditionalSecrets", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AdditionalSecrets = append(m.AdditionalSecrets, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RespondWithChallenges", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.RespondWithChallenges = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RedirectURIs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RedirectURIs = append(m.RedirectURIs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GrantMethod", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.GrantMethod = GrantHandlerType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ScopeRestrictions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ScopeRestrictions = append(m.ScopeRestrictions, ScopeRestriction{})
+ if err := m.ScopeRestrictions[len(m.ScopeRestrictions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenMaxAgeSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AccessTokenMaxAgeSeconds = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenInactivityTimeoutSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AccessTokenInactivityTimeoutSeconds = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthClientAuthorization) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthClientAuthorization: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthClientAuthorization: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserUID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthClientAuthorizationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthClientAuthorizationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthClientAuthorizationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, OAuthClientAuthorization{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthClientList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthClientList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthClientList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, OAuthClient{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OAuthRedirectReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OAuthRedirectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OAuthRedirectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RedirectReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RedirectReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RedirectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ScopeRestriction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ScopeRestriction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExactValues", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExactValues = append(m.ExactValues, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterRole", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterRole == nil {
+ m.ClusterRole = &ClusterRoleScopeRestriction{}
+ }
+ if err := m.ClusterRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UserOAuthAccessToken) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UserOAuthAccessToken: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UserOAuthAccessToken: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType)
+ }
+ m.ExpiresIn = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ExpiresIn |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RedirectURI = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UserUID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthorizeToken", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AuthorizeToken = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RefreshToken", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RefreshToken = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InactivityTimeoutSeconds", wireType)
+ }
+ m.InactivityTimeoutSeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.InactivityTimeoutSeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UserOAuthAccessTokenList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UserOAuthAccessTokenList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UserOAuthAccessTokenList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, UserOAuthAccessToken{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.proto b/vendor/github.com/openshift/api/oauth/v1/generated.proto
new file mode 100644
index 0000000000..829025a83f
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/generated.proto
@@ -0,0 +1,321 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.oauth.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/oauth/v1";
+
+// ClusterRoleScopeRestriction describes restrictions on cluster role scopes
+message ClusterRoleScopeRestriction {
+ // RoleNames is the list of cluster roles that can referenced. * means anything
+ repeated string roleNames = 1;
+
+ // Namespaces is the list of namespaces that can be referenced. * means any of them (including *)
+ repeated string namespaces = 2;
+
+ // AllowEscalation indicates whether you can request roles and their escalating resources
+ optional bool allowEscalation = 3;
+}
+
+// OAuthAccessToken describes an OAuth access token.
+// The name of a token must be prefixed with a `sha256~` string, must not contain "/" or "%" characters and must be at
+// least 32 characters long.
+//
+// The name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded
+// base64-encoding (as described in RFC4648) on the hashed result.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthAccessToken {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // ClientName references the client that created this token.
+ optional string clientName = 2;
+
+ // ExpiresIn is the seconds from CreationTime before this token expires.
+ optional int64 expiresIn = 3;
+
+ // Scopes is an array of the requested scopes.
+ repeated string scopes = 4;
+
+ // RedirectURI is the redirection associated with the token.
+ optional string redirectURI = 5;
+
+ // UserName is the user name associated with this token
+ optional string userName = 6;
+
+ // UserUID is the unique UID associated with this token
+ optional string userUID = 7;
+
+ // AuthorizeToken contains the token that authorized this token
+ optional string authorizeToken = 8;
+
+ // RefreshToken is the value by which this token can be renewed. Can be blank.
+ optional string refreshToken = 9;
+
+ // InactivityTimeoutSeconds is the value in seconds, from the
+ // CreationTimestamp, after which this token can no longer be used.
+ // The value is automatically incremented when the token is used.
+ optional int32 inactivityTimeoutSeconds = 10;
+}
+
+// OAuthAccessTokenList is a collection of OAuth access tokens
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthAccessTokenList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of OAuth access tokens
+ repeated OAuthAccessToken items = 2;
+}
+
+// OAuthAuthorizeToken describes an OAuth authorization token
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthAuthorizeToken {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // ClientName references the client that created this token.
+ optional string clientName = 2;
+
+ // ExpiresIn is the seconds from CreationTime before this token expires.
+ optional int64 expiresIn = 3;
+
+ // Scopes is an array of the requested scopes.
+ repeated string scopes = 4;
+
+ // RedirectURI is the redirection associated with the token.
+ optional string redirectURI = 5;
+
+ // State data from request
+ optional string state = 6;
+
+ // UserName is the user name associated with this token
+ optional string userName = 7;
+
+ // UserUID is the unique UID associated with this token. UserUID and UserName must both match
+ // for this token to be valid.
+ optional string userUID = 8;
+
+ // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636
+ optional string codeChallenge = 9;
+
+ // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636
+ optional string codeChallengeMethod = 10;
+}
+
+// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthAuthorizeTokenList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of OAuth authorization tokens
+ repeated OAuthAuthorizeToken items = 2;
+}
+
+// OAuthClient describes an OAuth client
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthClient {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Secret is the unique secret associated with a client
+ optional string secret = 2;
+
+ // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation
+ // and for service account token validation
+ repeated string additionalSecrets = 3;
+
+ // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects
+ optional bool respondWithChallenges = 4;
+
+ // RedirectURIs is the valid redirection URIs associated with a client
+ // +patchStrategy=merge
+ repeated string redirectURIs = 5;
+
+ // GrantMethod is a required field which determines how to handle grants for this client.
+ // Valid grant handling methods are:
+ // - auto: always approves grant requests, useful for trusted clients
+ // - prompt: prompts the end user for approval of grant requests, useful for third-party clients
+ optional string grantMethod = 6;
+
+ // ScopeRestrictions describes which scopes this client can request. Each requested scope
+ // is checked against each restriction. If any restriction matches, then the scope is allowed.
+ // If no restriction matches, then the scope is denied.
+ repeated ScopeRestriction scopeRestrictions = 7;
+
+ // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client.
+ // 0 means no expiration.
+ optional int32 accessTokenMaxAgeSeconds = 8;
+
+ // AccessTokenInactivityTimeoutSeconds overrides the default token
+ // inactivity timeout for tokens granted to this client.
+ // The value represents the maximum amount of time that can occur between
+ // consecutive uses of the token. Tokens become invalid if they are not
+ // used within this temporal window. The user will need to acquire a new
+ // token to regain access once a token times out.
+ // This value needs to be set only if the default set in configuration is
+ // not appropriate for this client. Valid values are:
+ // - 0: Tokens for this client never time out
+ // - X: Tokens time out if there is no activity for X seconds
+ // The current minimum allowed value for X is 300 (5 minutes)
+ //
+ // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value
+ optional int32 accessTokenInactivityTimeoutSeconds = 9;
+}
+
+// OAuthClientAuthorization describes an authorization created by an OAuth client
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthClientAuthorization {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // ClientName references the client that created this authorization
+ optional string clientName = 2;
+
+ // UserName is the user name that authorized this client
+ optional string userName = 3;
+
+ // UserUID is the unique UID associated with this authorization. UserUID and UserName
+ // must both match for this authorization to be valid.
+ optional string userUID = 4;
+
+ // Scopes is an array of the granted scopes.
+ repeated string scopes = 5;
+}
+
+// OAuthClientAuthorizationList is a collection of OAuth client authorizations
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthClientAuthorizationList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of OAuth client authorizations
+ repeated OAuthClientAuthorization items = 2;
+}
+
+// OAuthClientList is a collection of OAuth clients
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthClientList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of OAuth clients
+ repeated OAuthClient items = 2;
+}
+
+// OAuthRedirectReference is a reference to an OAuth redirect object.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message OAuthRedirectReference {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // The reference to an redirect object in the current namespace.
+ optional RedirectReference reference = 2;
+}
+
+// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed.
+message RedirectReference {
+ // The group of the target that is being referred to.
+ optional string group = 1;
+
+ // The kind of the target that is being referred to. Currently, only 'Route' is allowed.
+ optional string kind = 2;
+
+ // The name of the target that is being referred to. e.g. name of the Route.
+ optional string name = 3;
+}
+
+// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.
+message ScopeRestriction {
+ // ExactValues means the scope has to match a particular set of strings exactly
+ repeated string literals = 1;
+
+ // ClusterRole describes a set of restrictions for cluster role scoping.
+ optional ClusterRoleScopeRestriction clusterRole = 2;
+}
+
+// UserOAuthAccessToken is a virtual resource to mirror OAuthAccessTokens to
+// the user the access token was issued for
+// +openshift:compatibility-gen:level=1
+message UserOAuthAccessToken {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // ClientName references the client that created this token.
+ optional string clientName = 2;
+
+ // ExpiresIn is the seconds from CreationTime before this token expires.
+ optional int64 expiresIn = 3;
+
+ // Scopes is an array of the requested scopes.
+ repeated string scopes = 4;
+
+ // RedirectURI is the redirection associated with the token.
+ optional string redirectURI = 5;
+
+ // UserName is the user name associated with this token
+ optional string userName = 6;
+
+ // UserUID is the unique UID associated with this token
+ optional string userUID = 7;
+
+ // AuthorizeToken contains the token that authorized this token
+ optional string authorizeToken = 8;
+
+ // RefreshToken is the value by which this token can be renewed. Can be blank.
+ optional string refreshToken = 9;
+
+ // InactivityTimeoutSeconds is the value in seconds, from the
+ // CreationTimestamp, after which this token can no longer be used.
+ // The value is automatically incremented when the token is used.
+ optional int32 inactivityTimeoutSeconds = 10;
+}
+
+// UserOAuthAccessTokenList is a collection of access tokens issued on behalf of
+// the requesting user
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message UserOAuthAccessTokenList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ repeated UserOAuthAccessToken items = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/oauth/v1/legacy.go b/vendor/github.com/openshift/api/oauth/v1/legacy.go
new file mode 100644
index 0000000000..65b57d2431
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/legacy.go
@@ -0,0 +1,30 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &OAuthAccessToken{},
+ &OAuthAccessTokenList{},
+ &OAuthAuthorizeToken{},
+ &OAuthAuthorizeTokenList{},
+ &OAuthClient{},
+ &OAuthClientList{},
+ &OAuthClientAuthorization{},
+ &OAuthClientAuthorizationList{},
+ &OAuthRedirectReference{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/oauth/v1/register.go b/vendor/github.com/openshift/api/oauth/v1/register.go
new file mode 100644
index 0000000000..9992dffea9
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/register.go
@@ -0,0 +1,47 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "oauth.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &OAuthAccessToken{},
+ &OAuthAccessTokenList{},
+ &OAuthAuthorizeToken{},
+ &OAuthAuthorizeTokenList{},
+ &OAuthClient{},
+ &OAuthClientList{},
+ &OAuthClientAuthorization{},
+ &OAuthClientAuthorizationList{},
+ &OAuthRedirectReference{},
+ &UserOAuthAccessToken{},
+ &UserOAuthAccessTokenList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/oauth/v1/types.go b/vendor/github.com/openshift/api/oauth/v1/types.go
new file mode 100644
index 0000000000..026c527f5b
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/types.go
@@ -0,0 +1,341 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthAccessToken describes an OAuth access token.
+// The name of a token must be prefixed with a `sha256~` string, must not contain "/" or "%" characters and must be at
+// least 32 characters long.
+//
+// The name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded
+// base64-encoding (as described in RFC4648) on the hashed result.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthAccessToken struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // ClientName references the client that created this token.
+ ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"`
+
+ // ExpiresIn is the seconds from CreationTime before this token expires.
+ ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"`
+
+ // Scopes is an array of the requested scopes.
+ Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"`
+
+ // RedirectURI is the redirection associated with the token.
+ RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"`
+
+ // UserName is the user name associated with this token
+ UserName string `json:"userName,omitempty" protobuf:"bytes,6,opt,name=userName"`
+
+ // UserUID is the unique UID associated with this token
+ UserUID string `json:"userUID,omitempty" protobuf:"bytes,7,opt,name=userUID"`
+
+ // AuthorizeToken contains the token that authorized this token
+ AuthorizeToken string `json:"authorizeToken,omitempty" protobuf:"bytes,8,opt,name=authorizeToken"`
+
+ // RefreshToken is the value by which this token can be renewed. Can be blank.
+ RefreshToken string `json:"refreshToken,omitempty" protobuf:"bytes,9,opt,name=refreshToken"`
+
+ // InactivityTimeoutSeconds is the value in seconds, from the
+ // CreationTimestamp, after which this token can no longer be used.
+ // The value is automatically incremented when the token is used.
+ InactivityTimeoutSeconds int32 `json:"inactivityTimeoutSeconds,omitempty" protobuf:"varint,10,opt,name=inactivityTimeoutSeconds"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthAuthorizeToken describes an OAuth authorization token
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthAuthorizeToken struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // ClientName references the client that created this token.
+ ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"`
+
+ // ExpiresIn is the seconds from CreationTime before this token expires.
+ ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"`
+
+ // Scopes is an array of the requested scopes.
+ Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"`
+
+ // RedirectURI is the redirection associated with the token.
+ RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"`
+
+ // State data from request
+ State string `json:"state,omitempty" protobuf:"bytes,6,opt,name=state"`
+
+ // UserName is the user name associated with this token
+ UserName string `json:"userName,omitempty" protobuf:"bytes,7,opt,name=userName"`
+
+ // UserUID is the unique UID associated with this token. UserUID and UserName must both match
+ // for this token to be valid.
+ UserUID string `json:"userUID,omitempty" protobuf:"bytes,8,opt,name=userUID"`
+
+ // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636
+ CodeChallenge string `json:"codeChallenge,omitempty" protobuf:"bytes,9,opt,name=codeChallenge"`
+
+ // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636
+ CodeChallengeMethod string `json:"codeChallengeMethod,omitempty" protobuf:"bytes,10,opt,name=codeChallengeMethod"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthClient describes an OAuth client
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthClient struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Secret is the unique secret associated with a client
+ Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+
+ // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation
+ // and for service account token validation
+ AdditionalSecrets []string `json:"additionalSecrets,omitempty" protobuf:"bytes,3,rep,name=additionalSecrets"`
+
+ // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects
+ RespondWithChallenges bool `json:"respondWithChallenges,omitempty" protobuf:"varint,4,opt,name=respondWithChallenges"`
+
+ // RedirectURIs is the valid redirection URIs associated with a client
+ // +patchStrategy=merge
+ RedirectURIs []string `json:"redirectURIs,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=redirectURIs"`
+
+ // GrantMethod is a required field which determines how to handle grants for this client.
+ // Valid grant handling methods are:
+ // - auto: always approves grant requests, useful for trusted clients
+ // - prompt: prompts the end user for approval of grant requests, useful for third-party clients
+ GrantMethod GrantHandlerType `json:"grantMethod,omitempty" protobuf:"bytes,6,opt,name=grantMethod,casttype=GrantHandlerType"`
+
+ // ScopeRestrictions describes which scopes this client can request. Each requested scope
+ // is checked against each restriction. If any restriction matches, then the scope is allowed.
+ // If no restriction matches, then the scope is denied.
+ ScopeRestrictions []ScopeRestriction `json:"scopeRestrictions,omitempty" protobuf:"bytes,7,rep,name=scopeRestrictions"`
+
+ // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client.
+ // 0 means no expiration.
+ AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty" protobuf:"varint,8,opt,name=accessTokenMaxAgeSeconds"`
+
+ // AccessTokenInactivityTimeoutSeconds overrides the default token
+ // inactivity timeout for tokens granted to this client.
+ // The value represents the maximum amount of time that can occur between
+ // consecutive uses of the token. Tokens become invalid if they are not
+ // used within this temporal window. The user will need to acquire a new
+ // token to regain access once a token times out.
+ // This value needs to be set only if the default set in configuration is
+ // not appropriate for this client. Valid values are:
+ // - 0: Tokens for this client never time out
+ // - X: Tokens time out if there is no activity for X seconds
+ // The current minimum allowed value for X is 300 (5 minutes)
+ //
+ // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value
+ AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty" protobuf:"varint,9,opt,name=accessTokenInactivityTimeoutSeconds"`
+}
+
+type GrantHandlerType string
+
+const (
+ // GrantHandlerAuto auto-approves client authorization grant requests
+ GrantHandlerAuto GrantHandlerType = "auto"
+ // GrantHandlerPrompt prompts the user to approve new client authorization grant requests
+ GrantHandlerPrompt GrantHandlerType = "prompt"
+ // GrantHandlerDeny auto-denies client authorization grant requests
+ GrantHandlerDeny GrantHandlerType = "deny"
+)
+
+// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.
+type ScopeRestriction struct {
+ // ExactValues means the scope has to match a particular set of strings exactly
+ ExactValues []string `json:"literals,omitempty" protobuf:"bytes,1,rep,name=literals"`
+
+ // ClusterRole describes a set of restrictions for cluster role scoping.
+ ClusterRole *ClusterRoleScopeRestriction `json:"clusterRole,omitempty" protobuf:"bytes,2,opt,name=clusterRole"`
+}
+
+// ClusterRoleScopeRestriction describes restrictions on cluster role scopes
+type ClusterRoleScopeRestriction struct {
+ // RoleNames is the list of cluster roles that can referenced. * means anything
+ RoleNames []string `json:"roleNames" protobuf:"bytes,1,rep,name=roleNames"`
+ // Namespaces is the list of namespaces that can be referenced. * means any of them (including *)
+ Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
+ // AllowEscalation indicates whether you can request roles and their escalating resources
+ AllowEscalation bool `json:"allowEscalation" protobuf:"varint,3,opt,name=allowEscalation"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthClientAuthorization describes an authorization created by an OAuth client
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthClientAuthorization struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // ClientName references the client that created this authorization
+ ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"`
+
+ // UserName is the user name that authorized this client
+ UserName string `json:"userName,omitempty" protobuf:"bytes,3,opt,name=userName"`
+
+ // UserUID is the unique UID associated with this authorization. UserUID and UserName
+ // must both match for this authorization to be valid.
+ UserUID string `json:"userUID,omitempty" protobuf:"bytes,4,opt,name=userUID"`
+
+ // Scopes is an array of the granted scopes.
+ Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthAccessTokenList is a collection of OAuth access tokens
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthAccessTokenList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of OAuth access tokens
+ Items []OAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthAuthorizeTokenList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of OAuth authorization tokens
+ Items []OAuthAuthorizeToken `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthClientList is a collection of OAuth clients
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthClientList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of OAuth clients
+ Items []OAuthClient `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthClientAuthorizationList is a collection of OAuth client authorizations
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthClientAuthorizationList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of OAuth client authorizations
+ Items []OAuthClientAuthorization `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OAuthRedirectReference is a reference to an OAuth redirect object.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OAuthRedirectReference struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // The reference to an redirect object in the current namespace.
+ Reference RedirectReference `json:"reference,omitempty" protobuf:"bytes,2,opt,name=reference"`
+}
+
+// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed.
+type RedirectReference struct {
+ // The group of the target that is being referred to.
+ Group string `json:"group" protobuf:"bytes,1,opt,name=group"`
+
+ // The kind of the target that is being referred to. Currently, only 'Route' is allowed.
+ Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+
+ // The name of the target that is being referred to. e.g. name of the Route.
+ Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// UserOAuthAccessToken is a virtual resource to mirror OAuthAccessTokens to
+// the user the access token was issued for
+// +openshift:compatibility-gen:level=1
+type UserOAuthAccessToken OAuthAccessToken
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// UserOAuthAccessTokenList is a collection of access tokens issued on behalf of
+// the requesting user
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type UserOAuthAccessTokenList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ Items []UserOAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..f1af9dc5f0
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go
@@ -0,0 +1,447 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterRoleScopeRestriction) DeepCopyInto(out *ClusterRoleScopeRestriction) {
+ *out = *in
+ if in.RoleNames != nil {
+ in, out := &in.RoleNames, &out.RoleNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Namespaces != nil {
+ in, out := &in.Namespaces, &out.Namespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleScopeRestriction.
+func (in *ClusterRoleScopeRestriction) DeepCopy() *ClusterRoleScopeRestriction {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterRoleScopeRestriction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthAccessToken) DeepCopyInto(out *OAuthAccessToken) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessToken.
+func (in *OAuthAccessToken) DeepCopy() *OAuthAccessToken {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthAccessToken)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthAccessToken) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthAccessTokenList) DeepCopyInto(out *OAuthAccessTokenList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OAuthAccessToken, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessTokenList.
+func (in *OAuthAccessTokenList) DeepCopy() *OAuthAccessTokenList {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthAccessTokenList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthAccessTokenList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthAuthorizeToken) DeepCopyInto(out *OAuthAuthorizeToken) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeToken.
+func (in *OAuthAuthorizeToken) DeepCopy() *OAuthAuthorizeToken {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthAuthorizeToken)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthAuthorizeToken) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthAuthorizeTokenList) DeepCopyInto(out *OAuthAuthorizeTokenList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OAuthAuthorizeToken, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeTokenList.
+func (in *OAuthAuthorizeTokenList) DeepCopy() *OAuthAuthorizeTokenList {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthAuthorizeTokenList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthAuthorizeTokenList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthClient) DeepCopyInto(out *OAuthClient) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.AdditionalSecrets != nil {
+ in, out := &in.AdditionalSecrets, &out.AdditionalSecrets
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.RedirectURIs != nil {
+ in, out := &in.RedirectURIs, &out.RedirectURIs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ScopeRestrictions != nil {
+ in, out := &in.ScopeRestrictions, &out.ScopeRestrictions
+ *out = make([]ScopeRestriction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.AccessTokenMaxAgeSeconds != nil {
+ in, out := &in.AccessTokenMaxAgeSeconds, &out.AccessTokenMaxAgeSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.AccessTokenInactivityTimeoutSeconds != nil {
+ in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClient.
+func (in *OAuthClient) DeepCopy() *OAuthClient {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthClient)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthClient) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthClientAuthorization) DeepCopyInto(out *OAuthClientAuthorization) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorization.
+func (in *OAuthClientAuthorization) DeepCopy() *OAuthClientAuthorization {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthClientAuthorization)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthClientAuthorization) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthClientAuthorizationList) DeepCopyInto(out *OAuthClientAuthorizationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OAuthClientAuthorization, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorizationList.
+func (in *OAuthClientAuthorizationList) DeepCopy() *OAuthClientAuthorizationList {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthClientAuthorizationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthClientAuthorizationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthClientList) DeepCopyInto(out *OAuthClientList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OAuthClient, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientList.
+func (in *OAuthClientList) DeepCopy() *OAuthClientList {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthClientList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthClientList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthRedirectReference) DeepCopyInto(out *OAuthRedirectReference) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Reference = in.Reference
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRedirectReference.
+func (in *OAuthRedirectReference) DeepCopy() *OAuthRedirectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthRedirectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OAuthRedirectReference) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RedirectReference) DeepCopyInto(out *RedirectReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectReference.
+func (in *RedirectReference) DeepCopy() *RedirectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(RedirectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScopeRestriction) DeepCopyInto(out *ScopeRestriction) {
+ *out = *in
+ if in.ExactValues != nil {
+ in, out := &in.ExactValues, &out.ExactValues
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ClusterRole != nil {
+ in, out := &in.ClusterRole, &out.ClusterRole
+ *out = new(ClusterRoleScopeRestriction)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeRestriction.
+func (in *ScopeRestriction) DeepCopy() *ScopeRestriction {
+ if in == nil {
+ return nil
+ }
+ out := new(ScopeRestriction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserOAuthAccessToken) DeepCopyInto(out *UserOAuthAccessToken) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Scopes != nil {
+ in, out := &in.Scopes, &out.Scopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessToken.
+func (in *UserOAuthAccessToken) DeepCopy() *UserOAuthAccessToken {
+ if in == nil {
+ return nil
+ }
+ out := new(UserOAuthAccessToken)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UserOAuthAccessToken) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserOAuthAccessTokenList) DeepCopyInto(out *UserOAuthAccessTokenList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]UserOAuthAccessToken, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessTokenList.
+func (in *UserOAuthAccessTokenList) DeepCopy() *UserOAuthAccessTokenList {
+ if in == nil {
+ return nil
+ }
+ out := new(UserOAuthAccessTokenList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UserOAuthAccessTokenList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..f62b715c01
--- /dev/null
+++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,171 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_ClusterRoleScopeRestriction = map[string]string{
+ "": "ClusterRoleScopeRestriction describes restrictions on cluster role scopes",
+ "roleNames": "RoleNames is the list of cluster roles that can referenced. * means anything",
+ "namespaces": "Namespaces is the list of namespaces that can be referenced. * means any of them (including *)",
+ "allowEscalation": "AllowEscalation indicates whether you can request roles and their escalating resources",
+}
+
+func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string {
+ return map_ClusterRoleScopeRestriction
+}
+
+var map_OAuthAccessToken = map[string]string{
+ "": "OAuthAccessToken describes an OAuth access token. The name of a token must be prefixed with a `sha256~` string, must not contain \"/\" or \"%\" characters and must be at least 32 characters long.\n\nThe name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded base64-encoding (as described in RFC4648) on the hashed result.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "clientName": "ClientName references the client that created this token.",
+ "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.",
+ "scopes": "Scopes is an array of the requested scopes.",
+ "redirectURI": "RedirectURI is the redirection associated with the token.",
+ "userName": "UserName is the user name associated with this token",
+ "userUID": "UserUID is the unique UID associated with this token",
+ "authorizeToken": "AuthorizeToken contains the token that authorized this token",
+ "refreshToken": "RefreshToken is the value by which this token can be renewed. Can be blank.",
+ "inactivityTimeoutSeconds": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.",
+}
+
+func (OAuthAccessToken) SwaggerDoc() map[string]string {
+ return map_OAuthAccessToken
+}
+
+var map_OAuthAccessTokenList = map[string]string{
+ "": "OAuthAccessTokenList is a collection of OAuth access tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of OAuth access tokens",
+}
+
+func (OAuthAccessTokenList) SwaggerDoc() map[string]string {
+ return map_OAuthAccessTokenList
+}
+
+var map_OAuthAuthorizeToken = map[string]string{
+ "": "OAuthAuthorizeToken describes an OAuth authorization token\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "clientName": "ClientName references the client that created this token.",
+ "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.",
+ "scopes": "Scopes is an array of the requested scopes.",
+ "redirectURI": "RedirectURI is the redirection associated with the token.",
+ "state": "State data from request",
+ "userName": "UserName is the user name associated with this token",
+ "userUID": "UserUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.",
+ "codeChallenge": "CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636",
+ "codeChallengeMethod": "CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636",
+}
+
+func (OAuthAuthorizeToken) SwaggerDoc() map[string]string {
+ return map_OAuthAuthorizeToken
+}
+
+var map_OAuthAuthorizeTokenList = map[string]string{
+ "": "OAuthAuthorizeTokenList is a collection of OAuth authorization tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of OAuth authorization tokens",
+}
+
+func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string {
+ return map_OAuthAuthorizeTokenList
+}
+
+var map_OAuthClient = map[string]string{
+ "": "OAuthClient describes an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "secret": "Secret is the unique secret associated with a client",
+ "additionalSecrets": "AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation",
+ "respondWithChallenges": "RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects",
+ "redirectURIs": "RedirectURIs is the valid redirection URIs associated with a client",
+ "grantMethod": "GrantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients",
+ "scopeRestrictions": "ScopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.",
+ "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.",
+ "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value",
+}
+
+func (OAuthClient) SwaggerDoc() map[string]string {
+ return map_OAuthClient
+}
+
+var map_OAuthClientAuthorization = map[string]string{
+ "": "OAuthClientAuthorization describes an authorization created by an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "clientName": "ClientName references the client that created this authorization",
+ "userName": "UserName is the user name that authorized this client",
+ "userUID": "UserUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.",
+ "scopes": "Scopes is an array of the granted scopes.",
+}
+
+func (OAuthClientAuthorization) SwaggerDoc() map[string]string {
+ return map_OAuthClientAuthorization
+}
+
+var map_OAuthClientAuthorizationList = map[string]string{
+ "": "OAuthClientAuthorizationList is a collection of OAuth client authorizations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of OAuth client authorizations",
+}
+
+func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string {
+ return map_OAuthClientAuthorizationList
+}
+
+var map_OAuthClientList = map[string]string{
+ "": "OAuthClientList is a collection of OAuth clients\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of OAuth clients",
+}
+
+func (OAuthClientList) SwaggerDoc() map[string]string {
+ return map_OAuthClientList
+}
+
+var map_OAuthRedirectReference = map[string]string{
+ "": "OAuthRedirectReference is a reference to an OAuth redirect object.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "reference": "The reference to an redirect object in the current namespace.",
+}
+
+func (OAuthRedirectReference) SwaggerDoc() map[string]string {
+ return map_OAuthRedirectReference
+}
+
+var map_RedirectReference = map[string]string{
+ "": "RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed.",
+ "group": "The group of the target that is being referred to.",
+ "kind": "The kind of the target that is being referred to. Currently, only 'Route' is allowed.",
+ "name": "The name of the target that is being referred to. e.g. name of the Route.",
+}
+
+func (RedirectReference) SwaggerDoc() map[string]string {
+ return map_RedirectReference
+}
+
+var map_ScopeRestriction = map[string]string{
+ "": "ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.",
+ "literals": "ExactValues means the scope has to match a particular set of strings exactly",
+ "clusterRole": "ClusterRole describes a set of restrictions for cluster role scoping.",
+}
+
+func (ScopeRestriction) SwaggerDoc() map[string]string {
+ return map_ScopeRestriction
+}
+
+var map_UserOAuthAccessTokenList = map[string]string{
+ "": "UserOAuthAccessTokenList is a collection of access tokens issued on behalf of the requesting user\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (UserOAuthAccessTokenList) SwaggerDoc() map[string]string {
+ return map_UserOAuthAccessTokenList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/.codegen.yaml b/vendor/github.com/openshift/api/openshiftcontrolplane/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/openshiftcontrolplane/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/install.go b/vendor/github.com/openshift/api/openshiftcontrolplane/install.go
new file mode 100644
index 0000000000..5c745fd7ff
--- /dev/null
+++ b/vendor/github.com/openshift/api/openshiftcontrolplane/install.go
@@ -0,0 +1,26 @@
+package openshiftcontrolplane
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ openshiftcontrolplanev1 "github.com/openshift/api/openshiftcontrolplane/v1"
+)
+
+const (
+ GroupName = "openshiftcontrolplane.config.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(openshiftcontrolplanev1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go
new file mode 100644
index 0000000000..4528e3c4a6
--- /dev/null
+++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=openshiftcontrolplane.config.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go
new file mode 100644
index 0000000000..3d0bb20f22
--- /dev/null
+++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go
@@ -0,0 +1,40 @@
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ osinv1 "github.com/openshift/api/osin/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "openshiftcontrolplane.config.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, osinv1.Install, configv1.Install)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &OpenShiftAPIServerConfig{},
+ &OpenShiftControllerManagerConfig{},
+ &BuildDefaultsConfig{},
+ &BuildOverridesConfig{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go
new file mode 100644
index 0000000000..f077f98ab0
--- /dev/null
+++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go
@@ -0,0 +1,456 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ buildv1 "github.com/openshift/api/build/v1"
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type OpenShiftAPIServerConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // provides the standard apiserver configuration
+ configv1.GenericAPIServerConfig `json:",inline"`
+
+ // aggregatorConfig contains information about how to verify the aggregator front proxy
+ AggregatorConfig FrontProxyConfig `json:"aggregatorConfig"`
+
+ // imagePolicyConfig feeds the image policy admission plugin
+ ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"`
+
+ // projectConfig feeds an admission plugin
+ ProjectConfig ProjectConfig `json:"projectConfig"`
+
+ // routingConfig holds information about routing and route generation
+ RoutingConfig RoutingConfig `json:"routingConfig"`
+
+ // serviceAccountOAuthGrantMethod is used for determining client authorization for service account oauth client.
+ // It must be either: deny, prompt, or ""
+ ServiceAccountOAuthGrantMethod GrantHandlerType `json:"serviceAccountOAuthGrantMethod"`
+
+ // jenkinsPipelineConfig holds information about the default Jenkins template
+ // used for JenkinsPipeline build strategy.
+ // TODO this needs to become a normal plugin config
+ JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"`
+
+ // cloudProviderFile points to the cloud config file
+ // TODO this needs to become a normal plugin config
+ CloudProviderFile string `json:"cloudProviderFile"`
+
+ // TODO this needs to be removed.
+ APIServerArguments map[string][]string `json:"apiServerArguments"`
+
+ // apiServers holds information about enabled/disabled API servers
+ APIServers APIServers `json:"apiServers"`
+}
+
+type APIServers struct {
+ // perGroupOptions is a list of enabled/disabled API servers in addition to the defaults
+ PerGroupOptions []PerGroupOptions `json:"perGroupOptions"`
+}
+
+type PerGroupOptions struct {
+ // name is an API server name (see OpenShiftAPIserverName
+ // typed constants for a complete list of available API servers).
+ Name OpenShiftAPIserverName `json:"name"`
+
+ // enabledVersions is a list of versions that must be enabled in addition to the defaults.
+ // Must not collide with the list of disabled versions
+ EnabledVersions []string `json:"enabledVersions"`
+
+ // disabledVersions is a list of versions that must be disabled in addition to the defaults.
+ // Must not collide with the list of enabled versions
+ DisabledVersions []string `json:"disabledVersions"`
+}
+
+type OpenShiftAPIserverName string
+
+const (
+ OpenShiftAppsAPIserver OpenShiftAPIserverName = "apps.openshift.io"
+ OpenShiftAuthorizationAPIserver OpenShiftAPIserverName = "authorization.openshift.io"
+ OpenShiftBuildAPIserver OpenShiftAPIserverName = "build.openshift.io"
+ OpenShiftImageAPIserver OpenShiftAPIserverName = "image.openshift.io"
+ OpenShiftProjectAPIserver OpenShiftAPIserverName = "project.openshift.io"
+ OpenShiftQuotaAPIserver OpenShiftAPIserverName = "quota.openshift.io"
+ OpenShiftRouteAPIserver OpenShiftAPIserverName = "route.openshift.io"
+ OpenShiftSecurityAPIserver OpenShiftAPIserverName = "security.openshift.io"
+ OpenShiftTemplateAPIserver OpenShiftAPIserverName = "template.openshift.io"
+)
+
+type FrontProxyConfig struct {
+ // clientCA is a path to the CA bundle to use to verify the common name of the front proxy's client cert
+ ClientCA string `json:"clientCA"`
+ // allowedNames is an optional list of common names to require a match from.
+ AllowedNames []string `json:"allowedNames"`
+
+ // usernameHeaders is the set of headers to check for the username
+ UsernameHeaders []string `json:"usernameHeaders"`
+ // groupHeaders is the set of headers to check for groups
+ GroupHeaders []string `json:"groupHeaders"`
+ // extraHeaderPrefixes is the set of header prefixes to check for user extra
+ ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"`
+}
+
+type GrantHandlerType string
+
+const (
+ // GrantHandlerAuto auto-approves client authorization grant requests
+ GrantHandlerAuto GrantHandlerType = "auto"
+ // GrantHandlerPrompt prompts the user to approve new client authorization grant requests
+ GrantHandlerPrompt GrantHandlerType = "prompt"
+ // GrantHandlerDeny auto-denies client authorization grant requests
+ GrantHandlerDeny GrantHandlerType = "deny"
+)
+
+// RoutingConfig holds the necessary configuration options for routing to subdomains
+type RoutingConfig struct {
+ // subdomain is the suffix appended to $service.$namespace. to form the default route hostname
+ // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the
+ // "default" route.
+ Subdomain string `json:"subdomain"`
+}
+
+type ImagePolicyConfig struct {
+ // maxImagesBulkImportedPerRepository controls the number of images that are imported when a user
+ // does a bulk import of a container repository. This number is set low to prevent users from
+ // importing large numbers of images accidentally. Set -1 for no limit.
+ MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"`
+ // allowedRegistriesForImport limits the container image registries that normal users may import
+ // images from. Set this list to the registries that you trust to contain valid Docker
+ // images and that you want applications to be able to import from. Users with
+ // permission to create Images or ImageStreamMappings via the API are not affected by
+ // this policy - typically only administrators or system integrations will have those
+ // permissions.
+ AllowedRegistriesForImport AllowedRegistries `json:"allowedRegistriesForImport"`
+
+ // internalRegistryHostname sets the hostname for the default internal image
+ // registry. The value must be in "hostname[:port]" format.
+ InternalRegistryHostname string `json:"internalRegistryHostname"`
+ // externalRegistryHostnames provides the hostnames for the default external image
+ // registry. The external hostname should be set only when the image registry
+ // is exposed externally. The first value is used in 'publicDockerImageRepository'
+ // field in ImageStreams. The value must be in "hostname[:port]" format.
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames"`
+
+ // additionalTrustedCA is a path to a pem bundle file containing additional CAs that
+ // should be trusted during imagestream import.
+ AdditionalTrustedCA string `json:"additionalTrustedCA"`
+}
+
+// AllowedRegistries represents a list of registries allowed for the image import.
+type AllowedRegistries []RegistryLocation
+
+// RegistryLocation contains a location of the registry specified by the registry domain
+// name. The domain name might include wildcards, like '*' or '??'.
+type RegistryLocation struct {
+ // DomainName specifies a domain name for the registry
+ // In case the registry use non-standard (80 or 443) port, the port should be included
+ // in the domain name as well.
+ DomainName string `json:"domainName"`
+ // Insecure indicates whether the registry is secure (https) or insecure (http)
+ // By default (if not specified) the registry is assumed as secure.
+ Insecure bool `json:"insecure,omitempty"`
+}
+
+type ProjectConfig struct {
+ // defaultNodeSelector holds default project node label selector
+ DefaultNodeSelector string `json:"defaultNodeSelector"`
+
+ // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint
+ ProjectRequestMessage string `json:"projectRequestMessage"`
+
+ // projectRequestTemplate is the template to use for creating projects in response to projectrequest.
+ // It is in the format namespace/template and it is optional.
+ // If it is not specified, a default template is used.
+ ProjectRequestTemplate string `json:"projectRequestTemplate"`
+}
+
+// JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy
+type JenkinsPipelineConfig struct {
+ // autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided
+ // template when the first build config in the project with type JenkinsPipeline
+ // is created. When not specified this option defaults to true.
+ AutoProvisionEnabled *bool `json:"autoProvisionEnabled"`
+ // templateNamespace contains the namespace name where the Jenkins template is stored
+ TemplateNamespace string `json:"templateNamespace"`
+ // templateName is the name of the default Jenkins template
+ TemplateName string `json:"templateName"`
+ // serviceName is the name of the Jenkins service OpenShift uses to detect
+ // whether a Jenkins pipeline handler has already been installed in a project.
+ // This value *must* match a service name in the provided template.
+ ServiceName string `json:"serviceName"`
+ // parameters specifies a set of optional parameters to the Jenkins template.
+ Parameters map[string]string `json:"parameters"`
+}
+
+// OpenShiftControllerName defines a string type used to represent the various
+// OpenShift controllers within openshift-controller-manager. These constants serve as identifiers
+// for the controllers and are used on both openshift/openshift-controller-manager
+// and openshift/cluster-openshift-controller-manager-operator repositories.
+type OpenShiftControllerName string
+
+const (
+ OpenShiftServiceAccountController OpenShiftControllerName = "openshift.io/serviceaccount"
+ OpenShiftDefaultRoleBindingsController OpenShiftControllerName = "openshift.io/default-rolebindings"
+ OpenShiftServiceAccountPullSecretsController OpenShiftControllerName = "openshift.io/serviceaccount-pull-secrets"
+ OpenShiftOriginNamespaceController OpenShiftControllerName = "openshift.io/origin-namespace"
+ OpenShiftBuildController OpenShiftControllerName = "openshift.io/build"
+ OpenShiftBuildConfigChangeController OpenShiftControllerName = "openshift.io/build-config-change"
+ OpenShiftBuilderServiceAccountController OpenShiftControllerName = "openshift.io/builder-serviceaccount"
+ OpenShiftBuilderRoleBindingsController OpenShiftControllerName = "openshift.io/builder-rolebindings"
+ OpenShiftDeployerController OpenShiftControllerName = "openshift.io/deployer"
+ OpenShiftDeployerServiceAccountController OpenShiftControllerName = "openshift.io/deployer-serviceaccount"
+ OpenShiftDeployerRoleBindingsController OpenShiftControllerName = "openshift.io/deployer-rolebindings"
+ OpenShiftDeploymentConfigController OpenShiftControllerName = "openshift.io/deploymentconfig"
+ OpenShiftImagePullerRoleBindingsController OpenShiftControllerName = "openshift.io/image-puller-rolebindings"
+ OpenShiftImageTriggerController OpenShiftControllerName = "openshift.io/image-trigger"
+ OpenShiftImageImportController OpenShiftControllerName = "openshift.io/image-import"
+ OpenShiftImageSignatureImportController OpenShiftControllerName = "openshift.io/image-signature-import"
+ OpenShiftTemplateInstanceController OpenShiftControllerName = "openshift.io/templateinstance"
+ OpenShiftTemplateInstanceFinalizerController OpenShiftControllerName = "openshift.io/templateinstancefinalizer"
+ OpenShiftUnidlingController OpenShiftControllerName = "openshift.io/unidling"
+ OpenShiftIngressIPController OpenShiftControllerName = "openshift.io/ingress-ip"
+ OpenShiftIngressToRouteController OpenShiftControllerName = "openshift.io/ingress-to-route"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type OpenShiftControllerManagerConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ KubeClientConfig configv1.KubeClientConfig `json:"kubeClientConfig"`
+
+ // servingInfo describes how to start serving
+ ServingInfo *configv1.HTTPServingInfo `json:"servingInfo"`
+
+ // leaderElection defines the configuration for electing a controller instance to make changes to
+ // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the
+ // legacy direct etcd election code will be used.
+ LeaderElection configv1.LeaderElection `json:"leaderElection"`
+
+ // controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
+ // named 'foo', '-foo' disables the controller named 'foo'.
+ // Defaults to "*".
+ Controllers []string `json:"controllers"`
+
+ ResourceQuota ResourceQuotaControllerConfig `json:"resourceQuota"`
+ ServiceServingCert ServiceServingCert `json:"serviceServingCert"`
+ Deployer DeployerControllerConfig `json:"deployer"`
+ Build BuildControllerConfig `json:"build"`
+ ServiceAccount ServiceAccountControllerConfig `json:"serviceAccount"`
+ DockerPullSecret DockerPullSecretControllerConfig `json:"dockerPullSecret"`
+ Network NetworkControllerConfig `json:"network"`
+ Ingress IngressControllerConfig `json:"ingress"`
+ ImageImport ImageImportControllerConfig `json:"imageImport"`
+ SecurityAllocator SecurityAllocator `json:"securityAllocator"`
+
+ // featureGates are the set of extra OpenShift feature gates for openshift-controller-manager.
+ // These feature gates can be used to enable features that are tech preview or otherwise not available on
+ // OpenShift by default.
+ FeatureGates []string `json:"featureGates"`
+}
+
+type DeployerControllerConfig struct {
+ ImageTemplateFormat ImageConfig `json:"imageTemplateFormat"`
+}
+
+type BuildControllerConfig struct {
+ ImageTemplateFormat ImageConfig `json:"imageTemplateFormat"`
+
+ BuildDefaults *BuildDefaultsConfig `json:"buildDefaults"`
+ BuildOverrides *BuildOverridesConfig `json:"buildOverrides"`
+
+ // additionalTrustedCA is a path to a pem bundle file containing additional CAs that
+ // should be trusted for image pushes and pulls during builds.
+ AdditionalTrustedCA string `json:"additionalTrustedCA"`
+}
+
+type ResourceQuotaControllerConfig struct {
+ ConcurrentSyncs int32 `json:"concurrentSyncs"`
+ SyncPeriod metav1.Duration `json:"syncPeriod"`
+ MinResyncPeriod metav1.Duration `json:"minResyncPeriod"`
+}
+
+type IngressControllerConfig struct {
+ // ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare
+ // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from.
+ // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips,
+ // nodes, pods, or services.
+ IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"`
+}
+
+// MasterNetworkConfig to be passed to the compiled in network plugin
+type NetworkControllerConfig struct {
+ NetworkPluginName string `json:"networkPluginName"`
+ // clusterNetworks contains a list of cluster networks that defines the global overlay networks L3 space.
+ ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"`
+ ServiceNetworkCIDR string `json:"serviceNetworkCIDR"`
+ VXLANPort uint32 `json:"vxlanPort"`
+}
+
+type ServiceAccountControllerConfig struct {
+ // managedNames is a list of service account names that will be auto-created in every namespace.
+ // If no names are specified, the ServiceAccountsController will not be started.
+ ManagedNames []string `json:"managedNames"`
+}
+
+type DockerPullSecretControllerConfig struct {
+ // registryURLs is a list of urls that the docker pull secrets should be valid for.
+ RegistryURLs []string `json:"registryURLs"`
+
+ // internalRegistryHostname is the hostname for the default internal image
+ // registry. The value must be in "hostname[:port]" format. Docker pull secrets
+ // will be generated for this registry.
+ InternalRegistryHostname string `json:"internalRegistryHostname"`
+}
+
+type ImageImportControllerConfig struct {
+ // maxScheduledImageImportsPerMinute is the maximum number of image streams that will be imported in the background per minute.
+ // The default value is 60. Set to -1 for unlimited.
+ MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"`
+ // disableScheduledImport allows scheduled background import of images to be disabled.
+ DisableScheduledImport bool `json:"disableScheduledImport"`
+ // scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams
+ // scheduled for background import are checked against the upstream repository. The default value is 15 minutes.
+ ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildDefaultsConfig controls the default information for Builds
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type BuildDefaultsConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // gitHTTPProxy is the location of the HTTPProxy for Git source
+ GitHTTPProxy string `json:"gitHTTPProxy,omitempty"`
+
+ // gitHTTPSProxy is the location of the HTTPSProxy for Git source
+ GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"`
+
+ // gitNoProxy is the list of domains for which the proxy should not be used
+ GitNoProxy string `json:"gitNoProxy,omitempty"`
+
+ // env is a set of default environment variables that will be applied to the
+ // build if the specified variables do not exist on the build
+ Env []corev1.EnvVar `json:"env,omitempty"`
+
+ // sourceStrategyDefaults are default values that apply to builds using the
+ // source strategy.
+ SourceStrategyDefaults *SourceStrategyDefaultsConfig `json:"sourceStrategyDefaults,omitempty"`
+
+ // imageLabels is a list of labels that are applied to the resulting image.
+ // User can override a default label by providing a label with the same name in their
+ // Build/BuildConfig.
+ ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"`
+
+ // nodeSelector is a selector which must be true for the build pod to fit on a node
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // annotations are annotations that will be added to the build pod
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // resources defines resource requirements to execute the build.
+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+// SourceStrategyDefaultsConfig contains values that apply to builds using the
+// source strategy.
+type SourceStrategyDefaultsConfig struct {
+ // incremental indicates if s2i build strategies should perform an incremental
+ // build or not
+ Incremental *bool `json:"incremental,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BuildOverridesConfig controls override settings for builds
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type BuildOverridesConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // forcePull overrides, if set, the equivalent value in the builds,
+ // i.e. false disables force pull for all builds,
+ // true enables force pull for all builds,
+ // independently of what each build specifies itself
+ // +optional
+ ForcePull *bool `json:"forcePull,omitempty"`
+
+ // imageLabels is a list of labels that are applied to the resulting image.
+ // If user provided a label in their Build/BuildConfig with the same name as one in this
+ // list, the user's label will be overwritten.
+ ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"`
+
+ // nodeSelector is a selector which must be true for the build pod to fit on a node
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // annotations are annotations that will be added to the build pod
+ Annotations map[string]string `json:"annotations,omitempty"`
+
+ // tolerations is a list of Tolerations that will override any existing
+ // tolerations set on a build pod.
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+}
+
+// ImageConfig holds the necessary configuration options for building image names for system components
+type ImageConfig struct {
+ // Format is the format of the name to be built for the system component
+ Format string `json:"format"`
+ // Latest determines if the latest tag will be pulled from the registry
+ Latest bool `json:"latest"`
+}
+
+// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for
+// pods fulfilling a service to serve with.
+type ServiceServingCert struct {
+ // Signer holds the signing information used to automatically sign serving certificates.
+ // If this value is nil, then certs are not signed automatically.
+ Signer *configv1.CertInfo `json:"signer"`
+}
+
+// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.
+type ClusterNetworkEntry struct {
+ // CIDR defines the total range of a cluster networks address space.
+ CIDR string `json:"cidr"`
+ // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.
+ HostSubnetLength uint32 `json:"hostSubnetLength"`
+}
+
+// SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.
+type SecurityAllocator struct {
+ // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the
+ // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks
+ // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the
+ // ranges container images will use once user namespaces are started).
+ UIDAllocatorRange string `json:"uidAllocatorRange"`
+ // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is
+ // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels
+ // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated
+ // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default
+ // will allow the server to set them automatically.
+ //
+ // Examples:
+ // * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511
+ // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511
+ //
+ MCSAllocatorRange string `json:"mcsAllocatorRange"`
+ // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS
+ // ranges (100k namespaces, 535k/5 labels).
+ MCSLabelsPerProject int `json:"mcsLabelsPerProject"`
+}
diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..62de55ed49
--- /dev/null
+++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go
@@ -0,0 +1,679 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ buildv1 "github.com/openshift/api/build/v1"
+ configv1 "github.com/openshift/api/config/v1"
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServers) DeepCopyInto(out *APIServers) {
+ *out = *in
+ if in.PerGroupOptions != nil {
+ in, out := &in.PerGroupOptions, &out.PerGroupOptions
+ *out = make([]PerGroupOptions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServers.
+func (in *APIServers) DeepCopy() *APIServers {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServers)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in AllowedRegistries) DeepCopyInto(out *AllowedRegistries) {
+ {
+ in := &in
+ *out = make(AllowedRegistries, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRegistries.
+func (in AllowedRegistries) DeepCopy() AllowedRegistries {
+ if in == nil {
+ return nil
+ }
+ out := new(AllowedRegistries)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildControllerConfig) DeepCopyInto(out *BuildControllerConfig) {
+ *out = *in
+ out.ImageTemplateFormat = in.ImageTemplateFormat
+ if in.BuildDefaults != nil {
+ in, out := &in.BuildDefaults, &out.BuildDefaults
+ *out = new(BuildDefaultsConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.BuildOverrides != nil {
+ in, out := &in.BuildOverrides, &out.BuildOverrides
+ *out = new(BuildOverridesConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildControllerConfig.
+func (in *BuildControllerConfig) DeepCopy() *BuildControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildDefaultsConfig) DeepCopyInto(out *BuildDefaultsConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]corev1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SourceStrategyDefaults != nil {
+ in, out := &in.SourceStrategyDefaults, &out.SourceStrategyDefaults
+ *out = new(SourceStrategyDefaultsConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]buildv1.ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Resources.DeepCopyInto(&out.Resources)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaultsConfig.
+func (in *BuildDefaultsConfig) DeepCopy() *BuildDefaultsConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildDefaultsConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildDefaultsConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuildOverridesConfig) DeepCopyInto(out *BuildOverridesConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.ForcePull != nil {
+ in, out := &in.ForcePull, &out.ForcePull
+ *out = new(bool)
+ **out = **in
+ }
+ if in.ImageLabels != nil {
+ in, out := &in.ImageLabels, &out.ImageLabels
+ *out = make([]buildv1.ImageLabel, len(*in))
+ copy(*out, *in)
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverridesConfig.
+func (in *BuildOverridesConfig) DeepCopy() *BuildOverridesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(BuildOverridesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BuildOverridesConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
+func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeployerControllerConfig) DeepCopyInto(out *DeployerControllerConfig) {
+ *out = *in
+ out.ImageTemplateFormat = in.ImageTemplateFormat
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployerControllerConfig.
+func (in *DeployerControllerConfig) DeepCopy() *DeployerControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DeployerControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DockerPullSecretControllerConfig) DeepCopyInto(out *DockerPullSecretControllerConfig) {
+ *out = *in
+ if in.RegistryURLs != nil {
+ in, out := &in.RegistryURLs, &out.RegistryURLs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerPullSecretControllerConfig.
+func (in *DockerPullSecretControllerConfig) DeepCopy() *DockerPullSecretControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DockerPullSecretControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FrontProxyConfig) DeepCopyInto(out *FrontProxyConfig) {
+ *out = *in
+ if in.AllowedNames != nil {
+ in, out := &in.AllowedNames, &out.AllowedNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.UsernameHeaders != nil {
+ in, out := &in.UsernameHeaders, &out.UsernameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupHeaders != nil {
+ in, out := &in.GroupHeaders, &out.GroupHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExtraHeaderPrefixes != nil {
+ in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontProxyConfig.
+func (in *FrontProxyConfig) DeepCopy() *FrontProxyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(FrontProxyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageConfig) DeepCopyInto(out *ImageConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfig.
+func (in *ImageConfig) DeepCopy() *ImageConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageImportControllerConfig) DeepCopyInto(out *ImageImportControllerConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportControllerConfig.
+func (in *ImageImportControllerConfig) DeepCopy() *ImageImportControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageImportControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImagePolicyConfig) DeepCopyInto(out *ImagePolicyConfig) {
+ *out = *in
+ if in.AllowedRegistriesForImport != nil {
+ in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport
+ *out = make(AllowedRegistries, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExternalRegistryHostnames != nil {
+ in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyConfig.
+func (in *ImagePolicyConfig) DeepCopy() *ImagePolicyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ImagePolicyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerConfig) DeepCopyInto(out *IngressControllerConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerConfig.
+func (in *IngressControllerConfig) DeepCopy() *IngressControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JenkinsPipelineConfig) DeepCopyInto(out *JenkinsPipelineConfig) {
+ *out = *in
+ if in.AutoProvisionEnabled != nil {
+ in, out := &in.AutoProvisionEnabled, &out.AutoProvisionEnabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineConfig.
+func (in *JenkinsPipelineConfig) DeepCopy() *JenkinsPipelineConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(JenkinsPipelineConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkControllerConfig) DeepCopyInto(out *NetworkControllerConfig) {
+ *out = *in
+ if in.ClusterNetworks != nil {
+ in, out := &in.ClusterNetworks, &out.ClusterNetworks
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkControllerConfig.
+func (in *NetworkControllerConfig) DeepCopy() *NetworkControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftAPIServerConfig) DeepCopyInto(out *OpenShiftAPIServerConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig)
+ in.AggregatorConfig.DeepCopyInto(&out.AggregatorConfig)
+ in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig)
+ out.ProjectConfig = in.ProjectConfig
+ out.RoutingConfig = in.RoutingConfig
+ in.JenkinsPipelineConfig.DeepCopyInto(&out.JenkinsPipelineConfig)
+ if in.APIServerArguments != nil {
+ in, out := &in.APIServerArguments, &out.APIServerArguments
+ *out = make(map[string][]string, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ in.APIServers.DeepCopyInto(&out.APIServers)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerConfig.
+func (in *OpenShiftAPIServerConfig) DeepCopy() *OpenShiftAPIServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftAPIServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenShiftAPIServerConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftControllerManagerConfig) DeepCopyInto(out *OpenShiftControllerManagerConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.KubeClientConfig = in.KubeClientConfig
+ if in.ServingInfo != nil {
+ in, out := &in.ServingInfo, &out.ServingInfo
+ *out = new(configv1.HTTPServingInfo)
+ (*in).DeepCopyInto(*out)
+ }
+ out.LeaderElection = in.LeaderElection
+ if in.Controllers != nil {
+ in, out := &in.Controllers, &out.Controllers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ out.ResourceQuota = in.ResourceQuota
+ in.ServiceServingCert.DeepCopyInto(&out.ServiceServingCert)
+ out.Deployer = in.Deployer
+ in.Build.DeepCopyInto(&out.Build)
+ in.ServiceAccount.DeepCopyInto(&out.ServiceAccount)
+ in.DockerPullSecret.DeepCopyInto(&out.DockerPullSecret)
+ in.Network.DeepCopyInto(&out.Network)
+ out.Ingress = in.Ingress
+ out.ImageImport = in.ImageImport
+ out.SecurityAllocator = in.SecurityAllocator
+ if in.FeatureGates != nil {
+ in, out := &in.FeatureGates, &out.FeatureGates
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerConfig.
+func (in *OpenShiftControllerManagerConfig) DeepCopy() *OpenShiftControllerManagerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftControllerManagerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenShiftControllerManagerConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerGroupOptions) DeepCopyInto(out *PerGroupOptions) {
+ *out = *in
+ if in.EnabledVersions != nil {
+ in, out := &in.EnabledVersions, &out.EnabledVersions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.DisabledVersions != nil {
+ in, out := &in.DisabledVersions, &out.DisabledVersions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerGroupOptions.
+func (in *PerGroupOptions) DeepCopy() *PerGroupOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(PerGroupOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectConfig) DeepCopyInto(out *ProjectConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfig.
+func (in *ProjectConfig) DeepCopy() *ProjectConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation.
+func (in *RegistryLocation) DeepCopy() *RegistryLocation {
+ if in == nil {
+ return nil
+ }
+ out := new(RegistryLocation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaControllerConfig) DeepCopyInto(out *ResourceQuotaControllerConfig) {
+ *out = *in
+ out.SyncPeriod = in.SyncPeriod
+ out.MinResyncPeriod = in.MinResyncPeriod
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaControllerConfig.
+func (in *ResourceQuotaControllerConfig) DeepCopy() *ResourceQuotaControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceQuotaControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoutingConfig) DeepCopyInto(out *RoutingConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfig.
+func (in *RoutingConfig) DeepCopy() *RoutingConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RoutingConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecurityAllocator) DeepCopyInto(out *SecurityAllocator) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityAllocator.
+func (in *SecurityAllocator) DeepCopy() *SecurityAllocator {
+ if in == nil {
+ return nil
+ }
+ out := new(SecurityAllocator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountControllerConfig) DeepCopyInto(out *ServiceAccountControllerConfig) {
+ *out = *in
+ if in.ManagedNames != nil {
+ in, out := &in.ManagedNames, &out.ManagedNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountControllerConfig.
+func (in *ServiceAccountControllerConfig) DeepCopy() *ServiceAccountControllerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountControllerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) {
+ *out = *in
+ if in.Signer != nil {
+ in, out := &in.Signer, &out.Signer
+ *out = new(configv1.CertInfo)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert.
+func (in *ServiceServingCert) DeepCopy() *ServiceServingCert {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceServingCert)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SourceStrategyDefaultsConfig) DeepCopyInto(out *SourceStrategyDefaultsConfig) {
+ *out = *in
+ if in.Incremental != nil {
+ in, out := &in.Incremental, &out.Incremental
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyDefaultsConfig.
+func (in *SourceStrategyDefaultsConfig) DeepCopy() *SourceStrategyDefaultsConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SourceStrategyDefaultsConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..25a9333bbf
--- /dev/null
+++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,257 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_APIServers = map[string]string{
+ "perGroupOptions": "perGroupOptions is a list of enabled/disabled API servers in addition to the defaults",
+}
+
+func (APIServers) SwaggerDoc() map[string]string {
+ return map_APIServers
+}
+
+var map_BuildControllerConfig = map[string]string{
+ "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted for image pushes and pulls during builds.",
+}
+
+func (BuildControllerConfig) SwaggerDoc() map[string]string {
+ return map_BuildControllerConfig
+}
+
+var map_BuildDefaultsConfig = map[string]string{
+ "": "BuildDefaultsConfig controls the default information for Builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source",
+ "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source",
+ "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used",
+ "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build",
+ "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.",
+ "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.",
+ "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node",
+ "annotations": "annotations are annotations that will be added to the build pod",
+ "resources": "resources defines resource requirements to execute the build.",
+}
+
+func (BuildDefaultsConfig) SwaggerDoc() map[string]string {
+ return map_BuildDefaultsConfig
+}
+
+var map_BuildOverridesConfig = map[string]string{
+ "": "BuildOverridesConfig controls override settings for builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself",
+ "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.",
+ "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node",
+ "annotations": "annotations are annotations that will be added to the build pod",
+ "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.",
+}
+
+func (BuildOverridesConfig) SwaggerDoc() map[string]string {
+ return map_BuildOverridesConfig
+}
+
+var map_ClusterNetworkEntry = map[string]string{
+ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.",
+ "cidr": "CIDR defines the total range of a cluster networks address space.",
+ "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.",
+}
+
+func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
+ return map_ClusterNetworkEntry
+}
+
+var map_DockerPullSecretControllerConfig = map[string]string{
+ "registryURLs": "registryURLs is a list of urls that the docker pull secrets should be valid for.",
+ "internalRegistryHostname": "internalRegistryHostname is the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. Docker pull secrets will be generated for this registry.",
+}
+
+func (DockerPullSecretControllerConfig) SwaggerDoc() map[string]string {
+ return map_DockerPullSecretControllerConfig
+}
+
+var map_FrontProxyConfig = map[string]string{
+ "clientCA": "clientCA is a path to the CA bundle to use to verify the common name of the front proxy's client cert",
+ "allowedNames": "allowedNames is an optional list of common names to require a match from.",
+ "usernameHeaders": "usernameHeaders is the set of headers to check for the username",
+ "groupHeaders": "groupHeaders is the set of headers to check for groups",
+ "extraHeaderPrefixes": "extraHeaderPrefixes is the set of header prefixes to check for user extra",
+}
+
+func (FrontProxyConfig) SwaggerDoc() map[string]string {
+ return map_FrontProxyConfig
+}
+
+var map_ImageConfig = map[string]string{
+ "": "ImageConfig holds the necessary configuration options for building image names for system components",
+ "format": "Format is the format of the name to be built for the system component",
+ "latest": "Latest determines if the latest tag will be pulled from the registry",
+}
+
+func (ImageConfig) SwaggerDoc() map[string]string {
+ return map_ImageConfig
+}
+
+var map_ImageImportControllerConfig = map[string]string{
+ "maxScheduledImageImportsPerMinute": "maxScheduledImageImportsPerMinute is the maximum number of image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.",
+ "disableScheduledImport": "disableScheduledImport allows scheduled background import of images to be disabled.",
+ "scheduledImageImportMinimumIntervalSeconds": "scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.",
+}
+
+func (ImageImportControllerConfig) SwaggerDoc() map[string]string {
+ return map_ImageImportControllerConfig
+}
+
+var map_ImagePolicyConfig = map[string]string{
+ "maxImagesBulkImportedPerRepository": "maxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number is set low to prevent users from importing large numbers of images accidentally. Set -1 for no limit.",
+ "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.",
+ "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.",
+ "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.",
+ "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.",
+}
+
+func (ImagePolicyConfig) SwaggerDoc() map[string]string {
+ return map_ImagePolicyConfig
+}
+
+var map_IngressControllerConfig = map[string]string{
+ "ingressIPNetworkCIDR": "ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.",
+}
+
+func (IngressControllerConfig) SwaggerDoc() map[string]string {
+ return map_IngressControllerConfig
+}
+
+var map_JenkinsPipelineConfig = map[string]string{
+ "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy",
+ "autoProvisionEnabled": "autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.",
+ "templateNamespace": "templateNamespace contains the namespace name where the Jenkins template is stored",
+ "templateName": "templateName is the name of the default Jenkins template",
+ "serviceName": "serviceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.",
+ "parameters": "parameters specifies a set of optional parameters to the Jenkins template.",
+}
+
+func (JenkinsPipelineConfig) SwaggerDoc() map[string]string {
+ return map_JenkinsPipelineConfig
+}
+
+var map_NetworkControllerConfig = map[string]string{
+ "": "MasterNetworkConfig to be passed to the compiled in network plugin",
+ "clusterNetworks": "clusterNetworks contains a list of cluster networks that defines the global overlay networks L3 space.",
+}
+
+func (NetworkControllerConfig) SwaggerDoc() map[string]string {
+ return map_NetworkControllerConfig
+}
+
+var map_OpenShiftAPIServerConfig = map[string]string{
+ "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "aggregatorConfig": "aggregatorConfig contains information about how to verify the aggregator front proxy",
+ "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin",
+ "projectConfig": "projectConfig feeds an admission plugin",
+ "routingConfig": "routingConfig holds information about routing and route generation",
+ "serviceAccountOAuthGrantMethod": "serviceAccountOAuthGrantMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt, or \"\"",
+ "jenkinsPipelineConfig": "jenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.",
+ "cloudProviderFile": "cloudProviderFile points to the cloud config file",
+ "apiServers": "apiServers holds information about enabled/disabled API servers",
+}
+
+func (OpenShiftAPIServerConfig) SwaggerDoc() map[string]string {
+ return map_OpenShiftAPIServerConfig
+}
+
+var map_OpenShiftControllerManagerConfig = map[string]string{
+ "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "servingInfo": "servingInfo describes how to start serving",
+ "leaderElection": "leaderElection defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.",
+ "controllers": "controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".",
+ "featureGates": "featureGates are the set of extra OpenShift feature gates for openshift-controller-manager. These feature gates can be used to enable features that are tech preview or otherwise not available on OpenShift by default.",
+}
+
+func (OpenShiftControllerManagerConfig) SwaggerDoc() map[string]string {
+ return map_OpenShiftControllerManagerConfig
+}
+
+var map_PerGroupOptions = map[string]string{
+ "name": "name is an API server name (see OpenShiftAPIserverName typed constants for a complete list of available API servers).",
+ "enabledVersions": "enabledVersions is a list of versions that must be enabled in addition to the defaults. Must not collide with the list of disabled versions",
+ "disabledVersions": "disabledVersions is a list of versions that must be disabled in addition to the defaults. Must not collide with the list of enabled versions",
+}
+
+func (PerGroupOptions) SwaggerDoc() map[string]string {
+ return map_PerGroupOptions
+}
+
+var map_ProjectConfig = map[string]string{
+ "defaultNodeSelector": "defaultNodeSelector holds default project node label selector",
+ "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint",
+ "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.",
+}
+
+func (ProjectConfig) SwaggerDoc() map[string]string {
+ return map_ProjectConfig
+}
+
+var map_RegistryLocation = map[string]string{
+ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.",
+ "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.",
+ "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.",
+}
+
+func (RegistryLocation) SwaggerDoc() map[string]string {
+ return map_RegistryLocation
+}
+
+var map_RoutingConfig = map[string]string{
+ "": "RoutingConfig holds the necessary configuration options for routing to subdomains",
+ "subdomain": "subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.",
+}
+
+func (RoutingConfig) SwaggerDoc() map[string]string {
+ return map_RoutingConfig
+}
+
+var map_SecurityAllocator = map[string]string{
+ "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.",
+ "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).",
+ "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511",
+ "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).",
+}
+
+func (SecurityAllocator) SwaggerDoc() map[string]string {
+ return map_SecurityAllocator
+}
+
+var map_ServiceAccountControllerConfig = map[string]string{
+ "managedNames": "managedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.",
+}
+
+func (ServiceAccountControllerConfig) SwaggerDoc() map[string]string {
+ return map_ServiceAccountControllerConfig
+}
+
+var map_ServiceServingCert = map[string]string{
+ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.",
+ "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.",
+}
+
+func (ServiceServingCert) SwaggerDoc() map[string]string {
+ return map_ServiceServingCert
+}
+
+var map_SourceStrategyDefaultsConfig = map[string]string{
+ "": "SourceStrategyDefaultsConfig contains values that apply to builds using the source strategy.",
+ "incremental": "incremental indicates if s2i build strategies should perform an incremental build or not",
+}
+
+func (SourceStrategyDefaultsConfig) SwaggerDoc() map[string]string {
+ return map_SourceStrategyDefaultsConfig
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/operator/.codegen.yaml b/vendor/github.com/openshift/api/operator/.codegen.yaml
new file mode 100644
index 0000000000..1f30181f13
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/.codegen.yaml
@@ -0,0 +1,6 @@
+schemapatch:
+swaggerdocs:
+ commentPolicy: Warn
+
+
+
diff --git a/vendor/github.com/openshift/api/operator/install.go b/vendor/github.com/openshift/api/operator/install.go
new file mode 100644
index 0000000000..9cbf25a4bb
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/install.go
@@ -0,0 +1,27 @@
+package operator
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+ operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1"
+)
+
+const (
+ GroupName = "operator.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(operatorv1alpha1.Install, operatorv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/Makefile b/vendor/github.com/openshift/api/operator/v1/Makefile
new file mode 100644
index 0000000000..77f5d34091
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="operator.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/operator/v1/doc.go b/vendor/github.com/openshift/api/operator/v1/doc.go
new file mode 100644
index 0000000000..3de961a7fc
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=operator.openshift.io
+package v1
diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go
new file mode 100644
index 0000000000..21919f9a8b
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/register.go
@@ -0,0 +1,80 @@
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "operator.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+
+ scheme.AddKnownTypes(GroupVersion,
+ &Authentication{},
+ &AuthenticationList{},
+ &DNS{},
+ &DNSList{},
+ &CloudCredential{},
+ &CloudCredentialList{},
+ &ClusterCSIDriver{},
+ &ClusterCSIDriverList{},
+ &Console{},
+ &ConsoleList{},
+ &CSISnapshotController{},
+ &CSISnapshotControllerList{},
+ &Etcd{},
+ &EtcdList{},
+ &KubeAPIServer{},
+ &KubeAPIServerList{},
+ &KubeControllerManager{},
+ &KubeControllerManagerList{},
+ &KubeScheduler{},
+ &KubeSchedulerList{},
+ &KubeStorageVersionMigrator{},
+ &KubeStorageVersionMigratorList{},
+ &MachineConfiguration{},
+ &MachineConfigurationList{},
+ &Network{},
+ &NetworkList{},
+ &OpenShiftAPIServer{},
+ &OpenShiftAPIServerList{},
+ &OpenShiftControllerManager{},
+ &OpenShiftControllerManagerList{},
+ &ServiceCA{},
+ &ServiceCAList{},
+ &ServiceCatalogAPIServer{},
+ &ServiceCatalogAPIServerList{},
+ &ServiceCatalogControllerManager{},
+ &ServiceCatalogControllerManagerList{},
+ &IngressController{},
+ &IngressControllerList{},
+ &InsightsOperator{},
+ &InsightsOperatorList{},
+ &Storage{},
+ &StorageList{},
+ )
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go
new file mode 100644
index 0000000000..19bc5a359b
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types.go
@@ -0,0 +1,245 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// MyOperatorResource is an example operator configuration type
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:internal
+type MyOperatorResource struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec MyOperatorResourceSpec `json:"spec"`
+ Status MyOperatorResourceStatus `json:"status"`
+}
+
+type MyOperatorResourceSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type MyOperatorResourceStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +kubebuilder:validation:Pattern=`^(Managed|Unmanaged|Force|Removed)$`
+type ManagementState string
+
+var (
+ // Force means that the operator is actively managing its resources but will not block an upgrade
+ // if unmet prereqs exist. This state puts the operator at risk for unsuccessful upgrades
+ Force ManagementState = "Force"
+ // Managed means that the operator is actively managing its resources and trying to keep the component active.
+ // It will only upgrade the component if it is safe to do so
+ Managed ManagementState = "Managed"
+ // Unmanaged means that the operator will not take any action related to the component
+ // Some operators might not support this management state as it might damage the cluster and lead to manual recovery.
+ Unmanaged ManagementState = "Unmanaged"
+ // Removed means that the operator is actively managing its resources and trying to remove all traces of the component
+ // Some operators (like kube-apiserver-operator) might not support this management state as removing the API server will
+ // brick the cluster.
+ Removed ManagementState = "Removed"
+)
+
+// OperatorSpec contains common fields operators need. It is intended to be anonymous included
+// inside of the Spec struct for your particular operator.
+type OperatorSpec struct {
+ // managementState indicates whether and how the operator should manage the component
+ ManagementState ManagementState `json:"managementState"`
+
+ // logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a
+ // simple way to manage coarse grained logging choices that operators have to interpret for their operands.
+ //
+ // Valid values are: "Normal", "Debug", "Trace", "TraceAll".
+ // Defaults to "Normal".
+ // +optional
+ // +kubebuilder:default=Normal
+ LogLevel LogLevel `json:"logLevel,omitempty"`
+
+ // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a
+ // simple way to manage coarse grained logging choices that operators have to interpret for themselves.
+ //
+ // Valid values are: "Normal", "Debug", "Trace", "TraceAll".
+ // Defaults to "Normal".
+ // +optional
+ // +kubebuilder:default=Normal
+ OperatorLogLevel LogLevel `json:"operatorLogLevel,omitempty"`
+
+ // unsupportedConfigOverrides overrides the final configuration that was computed by the operator.
+ // Red Hat does not support the use of this field.
+ // Misuse of this field could lead to unexpected behavior or conflict with other configuration options.
+ // Seek guidance from the Red Hat support before using this field.
+ // Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster.
+ // +optional
+ // +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
+ UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"`
+
+ // observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because
+ // it is an input to the level for the operator
+ // +optional
+ // +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
+ ObservedConfig runtime.RawExtension `json:"observedConfig"`
+}
+
+// +kubebuilder:validation:Enum="";Normal;Debug;Trace;TraceAll
+type LogLevel string
+
+var (
+ // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2.
+ Normal LogLevel = "Normal"
+
+ // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4.
+ Debug LogLevel = "Debug"
+
+ // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6.
+ Trace LogLevel = "Trace"
+
+ // TraceAll is used when something is broken at the level of API content/decoding. It will dump complete body content. If you turn this on in a production cluster
+ // prepare from serious performance issues and massive amounts of logs. In kube, this is probably glog=8.
+ TraceAll LogLevel = "TraceAll"
+)
+
+type OperatorStatus struct {
+ // observedGeneration is the last generation change you've dealt with
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // conditions is a list of conditions and their status
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ Conditions []OperatorCondition `json:"conditions,omitempty"`
+
+ // version is the level this availability applies to
+ // +optional
+ Version string `json:"version,omitempty"`
+
+ // readyReplicas indicates how many replicas are ready and at the desired state
+ ReadyReplicas int32 `json:"readyReplicas"`
+
+ // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.
+ // +listType=atomic
+ // +optional
+ Generations []GenerationStatus `json:"generations,omitempty"`
+}
+
+// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.
+type GenerationStatus struct {
+ // group is the group of the thing you're tracking
+ Group string `json:"group"`
+ // resource is the resource type of the thing you're tracking
+ Resource string `json:"resource"`
+ // namespace is where the thing you're tracking is
+ Namespace string `json:"namespace"`
+ // name is the name of the thing you're tracking
+ Name string `json:"name"`
+ // lastGeneration is the last generation of the workload controller involved
+ LastGeneration int64 `json:"lastGeneration"`
+ // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps
+ Hash string `json:"hash"`
+}
+
+var (
+ // Available indicates that the operand is present and accessible in the cluster
+ OperatorStatusTypeAvailable = "Available"
+ // Progressing indicates that the operator is trying to transition the operand to a different state
+ OperatorStatusTypeProgressing = "Progressing"
+ // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent
+ OperatorStatusTypeDegraded = "Degraded"
+ // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the
+ // current and desired states.
+ OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied"
+ // Upgradeable indicates that the operator configuration itself (not prereqs) can be auto-upgraded by the CVO
+ OperatorStatusTypeUpgradeable = "Upgradeable"
+)
+
+// OperatorCondition is just the standard condition fields.
+type OperatorCondition struct {
+ // +kubebuilder:validation:Required
+ Type string `json:"type"`
+ Status ConditionStatus `json:"status"`
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason string `json:"reason,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+type ConditionStatus string
+
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// StaticPodOperatorSpec is spec for controllers that manage static pods.
+type StaticPodOperatorSpec struct {
+ OperatorSpec `json:",inline"`
+
+ // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string.
+ // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work
+ // this time instead of failing again on the same config.
+ ForceRedeploymentReason string `json:"forceRedeploymentReason"`
+
+ // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api
+ // -1 = unlimited, 0 or unset = 5 (default)
+ FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"`
+ // succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api
+ // -1 = unlimited, 0 or unset = 5 (default)
+ SucceededRevisionLimit int32 `json:"succeededRevisionLimit,omitempty"`
+}
+
+// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual
+// node status must be tracked.
+type StaticPodOperatorStatus struct {
+ OperatorStatus `json:",inline"`
+
+ // latestAvailableRevision is the deploymentID of the most recent deployment
+ // +optional
+ LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"`
+
+ // latestAvailableRevisionReason describe the detailed reason for the most recent deployment
+ // +optional
+ LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitempty"`
+
+ // nodeStatuses track the deployment values and errors across individual nodes
+ // +listType=map
+ // +listMapKey=nodeName
+ // +optional
+ NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"`
+}
+
+// NodeStatus provides information about the current state of a particular node managed by this operator.
+type NodeStatus struct {
+ // nodeName is the name of the node
+ // +kubebuilder:validation:Required
+ NodeName string `json:"nodeName"`
+
+ // currentRevision is the generation of the most recently successful deployment
+ CurrentRevision int32 `json:"currentRevision"`
+ // targetRevision is the generation of the deployment we're trying to apply
+ TargetRevision int32 `json:"targetRevision,omitempty"`
+
+ // lastFailedRevision is the generation of the deployment we tried and failed to deploy.
+ LastFailedRevision int32 `json:"lastFailedRevision,omitempty"`
+ // lastFailedTime is the time the last failed revision failed the last time.
+ LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"`
+ // lastFailedReason is a machine readable failure reason string.
+ LastFailedReason string `json:"lastFailedReason,omitempty"`
+ // lastFailedCount is how often the installer pod of the last failed revision failed.
+ LastFailedCount int `json:"lastFailedCount,omitempty"`
+ // lastFallbackCount is how often a fallback to a previous revision happened.
+ LastFallbackCount int `json:"lastFallbackCount,omitempty"`
+ // lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision.
+ // +listType=atomic
+ LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go
new file mode 100644
index 0000000000..58d8748d97
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go
@@ -0,0 +1,69 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=authentications,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=authentication,operatorOrdering=01
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+
+// Authentication provides information to configure an operator to manage authentication.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Authentication struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec AuthenticationSpec `json:"spec,omitempty"`
+ // +optional
+ Status AuthenticationStatus `json:"status,omitempty"`
+}
+
+type AuthenticationSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type AuthenticationStatus struct {
+ // OAuthAPIServer holds status specific only to oauth-apiserver
+ // +optional
+ OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"`
+
+ OperatorStatus `json:",inline"`
+}
+
+type OAuthAPIServerStatus struct {
+ // LatestAvailableRevision is the latest revision used as suffix of revisioned
+ // secrets like encryption-config. A new revision causes a new deployment of pods.
+ // +optional
+ // +kubebuilder:validation:Minimum=0
+ LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuthenticationList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type AuthenticationList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Authentication `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go
new file mode 100644
index 0000000000..9666b27922
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go
@@ -0,0 +1,93 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=cloudcredentials,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/692
+// +openshift:capability=CloudCredential
+// +openshift:file-pattern=cvoRunLevel=0000_40,operatorName=cloud-credential,operatorOrdering=00
+
+// CloudCredential provides a means to configure an operator to manage CredentialsRequests.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type CloudCredential struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec CloudCredentialSpec `json:"spec"`
+ // +optional
+ Status CloudCredentialStatus `json:"status"`
+}
+
+// CloudCredentialsMode is the specified mode the cloud-credential-operator
+// should reconcile CredentialsRequest with
+// +kubebuilder:validation:Enum="";Manual;Mint;Passthrough
+type CloudCredentialsMode string
+
+const (
+ // CloudCredentialsModeManual tells cloud-credential-operator to not reconcile any CredentialsRequests
+ // (primarily used for the disconnected VPC use-cases).
+ CloudCredentialsModeManual CloudCredentialsMode = "Manual"
+
+ // CloudCredentialsModeMint tells cloud-credential-operator to reconcile all CredentialsRequests
+ // by minting new users/credentials.
+ CloudCredentialsModeMint CloudCredentialsMode = "Mint"
+
+ // CloudCredentialsModePassthrough tells cloud-credential-operator to reconcile all CredentialsRequests
+ // by copying the cloud-specific secret data.
+ CloudCredentialsModePassthrough CloudCredentialsMode = "Passthrough"
+
+ // CloudCredentialsModeDefault puts CCO into the default mode of operation (per-cloud/platform defaults):
+ // AWS/Azure/GCP: dynamically determine cluster's cloud credential capabilities to affect
+ // processing of CredentialsRequests
+ // All other clouds/platforms (OpenStack, oVirt, vSphere, etc): run in "passthrough" mode
+ CloudCredentialsModeDefault CloudCredentialsMode = ""
+)
+
+// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.
+type CloudCredentialSpec struct {
+ OperatorSpec `json:",inline"`
+ // CredentialsMode allows informing CCO that it should not attempt to dynamically
+ // determine the root cloud credentials capabilities, and it should just run in
+ // the specified mode.
+ // It also allows putting the operator into "manual" mode if desired.
+ // Leaving the field in default mode runs CCO so that the cluster's cloud credentials
+ // will be dynamically probed for capabilities (on supported clouds/platforms).
+ // Supported modes:
+ // AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual"
+ // Others: Do not set value as other platforms only support running in "Passthrough"
+ // +optional
+ CredentialsMode CloudCredentialsMode `json:"credentialsMode,omitempty"`
+}
+
+// CloudCredentialStatus defines the observed status of the cloud-credential-operator.
+type CloudCredentialStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type CloudCredentialList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []CloudCredential `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go
new file mode 100644
index 0000000000..e7c6d59dbb
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_config.go
@@ -0,0 +1,61 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=configs,scope=Cluster,categories=coreoperators
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/612
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+
+// Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components
+// on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Config struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the specification of the desired behavior of the Config Operator.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ConfigSpec `json:"spec"`
+
+ // status defines the observed status of the Config Operator.
+ // +optional
+ Status ConfigStatus `json:"status"`
+}
+
+type ConfigSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type ConfigStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ConfigList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []Config `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go
new file mode 100644
index 0000000000..474253d5d7
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_console.go
@@ -0,0 +1,390 @@
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ authorizationv1 "k8s.io/api/authorization/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=consoles,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/486
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=console,operatorOrdering=01
+
+// Console provides a means to configure an operator to manage the console.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Console struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ConsoleSpec `json:"spec,omitempty"`
+ // +optional
+ Status ConsoleStatus `json:"status,omitempty"`
+}
+
+// ConsoleSpec is the specification of the desired behavior of the Console.
+type ConsoleSpec struct {
+ OperatorSpec `json:",inline"`
+ // customization is used to optionally provide a small set of
+ // customization options to the web console.
+ // +optional
+ Customization ConsoleCustomization `json:"customization"`
+ // providers contains configuration for using specific service providers.
+ Providers ConsoleProviders `json:"providers"`
+ // route contains hostname and secret reference that contains the serving certificate.
+ // If a custom route is specified, a new route will be created with the
+ // provided hostname, under which console will be available.
+ // In case of custom hostname uses the default routing suffix of the cluster,
+ // the Secret specification for a serving certificate will not be needed.
+ // In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary.
+ // The default console route will be maintained to reserve the default hostname
+ // for console if the custom route is removed.
+ // If not specified, default route will be used.
+ // DEPRECATED
+ // +optional
+ Route ConsoleConfigRoute `json:"route"`
+ // plugins defines a list of enabled console plugin names.
+ // +optional
+ Plugins []string `json:"plugins,omitempty"`
+}
+
+// ConsoleConfigRoute holds information on external route access to console.
+// DEPRECATED
+type ConsoleConfigRoute struct {
+ // hostname is the desired custom domain under which console will be available.
+ Hostname string `json:"hostname"`
+ // secret points to secret in the openshift-config namespace that contains custom
+ // certificate and key and needs to be created manually by the cluster admin.
+ // Referenced Secret is required to contain following key value pairs:
+ // - "tls.crt" - to specifies custom certificate
+ // - "tls.key" - to specifies private key of the custom certificate
+ // If the custom hostname uses the default routing suffix of the cluster,
+ // the Secret specification for a serving certificate will not be needed.
+ // +optional
+ Secret configv1.SecretNameReference `json:"secret"`
+}
+
+// ConsoleStatus defines the observed status of the Console.
+type ConsoleStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// ConsoleProviders defines a list of optional additional providers of
+// functionality to the console.
+type ConsoleProviders struct {
+ // statuspage contains ID for statuspage.io page that provides status info about.
+ // +optional
+ Statuspage *StatuspageProvider `json:"statuspage,omitempty"`
+}
+
+// StatuspageProvider provides identity for statuspage account.
+type StatuspageProvider struct {
+ // pageID is the unique ID assigned by Statuspage for your page. This must be a public page.
+ PageID string `json:"pageID"`
+}
+
+// ConsoleCustomization defines a list of optional configuration for the console UI.
+type ConsoleCustomization struct {
+ // brand is the default branding of the web console which can be overridden by
+ // providing the brand field. There is a limited set of specific brand options.
+ // This field controls elements of the console such as the logo.
+ // Invalid value will prevent a console rollout.
+ // +kubebuilder:validation:Enum:=openshift;okd;online;ocp;dedicated;azure;OpenShift;OKD;Online;OCP;Dedicated;Azure;ROSA
+ Brand Brand `json:"brand,omitempty"`
+ // documentationBaseURL links to external documentation are shown in various sections
+ // of the web console. Providing documentationBaseURL will override the default
+ // documentation URL.
+ // Invalid value will prevent a console rollout.
+ // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$`
+ DocumentationBaseURL string `json:"documentationBaseURL,omitempty"`
+ // customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog
+ // instead of the normal OpenShift product name.
+ // +optional
+ CustomProductName string `json:"customProductName,omitempty"`
+ // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a
+ // ConfigMap in the openshift-config namespace. This can be created with a command like
+ // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'.
+ // Image size must be less than 1 MB due to constraints on the ConfigMap size.
+ // The ConfigMap key should include a file extension so that the console serves the file
+ // with the correct MIME type.
+ // Recommended logo specifications:
+ // Dimensions: Max height of 68px and max width of 200px
+ // SVG format preferred
+ // +optional
+ CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"`
+ // developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs).
+ // +kubebuilder:validation:Optional
+ // +optional
+ DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"`
+ // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective
+ // Project access page which can be used by a project admin to specify roles to other users and
+ // restrict access within the project. If set, the list will replace the default ClusterRole options.
+ // +kubebuilder:validation:Optional
+ // +optional
+ ProjectAccess ProjectAccess `json:"projectAccess,omitempty"`
+ // quickStarts allows customization of available ConsoleQuickStart resources in console.
+ // +kubebuilder:validation:Optional
+ // +optional
+ QuickStarts QuickStarts `json:"quickStarts,omitempty"`
+ // addPage allows customizing actions on the Add page in developer perspective.
+ // +kubebuilder:validation:Optional
+ // +optional
+ AddPage AddPage `json:"addPage,omitempty"`
+ // perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown.
+ // +listType=map
+ // +listMapKey=id
+ // +optional
+ Perspectives []Perspective `json:"perspectives"`
+}
+
+// ProjectAccess contains options for project access roles
+type ProjectAccess struct {
+ // availableClusterRoles is the list of ClusterRole names that are assignable to users
+ // through the project access tab.
+ // +kubebuilder:validation:Optional
+ // +optional
+ AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"`
+}
+
+// CatalogTypesState defines the state of the catalog types based on which the types will be enabled or disabled.
+type CatalogTypesState string
+
+const (
+ CatalogTypeEnabled CatalogTypesState = "Enabled"
+ CatalogTypeDisabled CatalogTypesState = "Disabled"
+)
+
+// DeveloperConsoleCatalogTypes defines the state of the sub-catalog types.
+// +kubebuilder:validation:XValidation:rule="self.state == 'Enabled' ? true : !has(self.enabled)",message="enabled is forbidden when state is not Enabled"
+// +kubebuilder:validation:XValidation:rule="self.state == 'Disabled' ? true : !has(self.disabled)",message="disabled is forbidden when state is not Disabled"
+// +union
+type DeveloperConsoleCatalogTypes struct {
+ // state defines if a list of catalog types should be enabled or disabled.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Enum:="Enabled";"Disabled";
+ // +kubebuilder:default:="Enabled"
+ // +default="Enabled"
+ // +kubebuilder:validation:Required
+ State CatalogTypesState `json:"state,omitempty"`
+ // enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users.
+ // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available
+ // in the console on the cluster configuration page, or when editing the YAML in the console.
+ // Example: "Devfile", "HelmChart", "BuilderImage"
+ // If the list is non-empty, a new type will not be shown to the user until it is added to list.
+ // If the list is empty the complete developer catalog will be shown.
+ // +listType=set
+ // +unionMember,optional
+ Enabled *[]string `json:"enabled,omitempty"`
+ // disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users.
+ // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available
+ // in the console on the cluster configuration page, or when editing the YAML in the console.
+ // Example: "Devfile", "HelmChart", "BuilderImage"
+ // If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden.
+ // +listType=set
+ // +unionMember,optional
+ Disabled *[]string `json:"disabled,omitempty"`
+}
+
+// DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog.
+type DeveloperConsoleCatalogCustomization struct {
+ // categories which are shown in the developer catalog.
+ // +kubebuilder:validation:Optional
+ // +optional
+ Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"`
+ // types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog.
+ // When omitted, all the sub-catalog types will be shown.
+ // +optional
+ Types DeveloperConsoleCatalogTypes `json:"types,omitempty"`
+}
+
+// DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.
+type DeveloperConsoleCatalogCategoryMeta struct {
+ // ID is an identifier used in the URL to enable deep linking in console.
+ // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=32
+ // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$`
+ // +required
+ ID string `json:"id"`
+ // label defines a category display label. It is required and must have 1-64 characters.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=64
+ // +required
+ Label string `json:"label"`
+ // tags is a list of strings that will match the category. A selected category
+ // show all items which has at least one overlapping tag between category and item.
+ // +kubebuilder:validation:Optional
+ // +optional
+ Tags []string `json:"tags,omitempty"`
+}
+
+// DeveloperConsoleCatalogCategory for the developer console catalog.
+type DeveloperConsoleCatalogCategory struct {
+ // defines top level category ID, label and filter tags.
+ DeveloperConsoleCatalogCategoryMeta `json:",inline"`
+ // subcategories defines a list of child categories.
+ // +kubebuilder:validation:Optional
+ // +optional
+ Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"`
+}
+
+// QuickStarts allow cluster admins to customize available ConsoleQuickStart resources.
+type QuickStarts struct {
+ // disabled is a list of ConsoleQuickStart resource names that are not shown to users.
+ // +kubebuilder:validation:Optional
+ // +optional
+ Disabled []string `json:"disabled,omitempty"`
+}
+
+// AddPage allows customizing actions on the Add page in developer perspective.
+type AddPage struct {
+ // disabledActions is a list of actions that are not shown to users.
+ // Each action in the list is represented by its ID.
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:MinItems=1
+ // +optional
+ DisabledActions []string `json:"disabledActions,omitempty"`
+}
+
+// PerspectiveState defines the visibility state of the perspective. "Enabled" means the perspective is shown.
+// "Disabled" means the Perspective is hidden.
+// "AccessReview" means access review check is required to show or hide a Perspective.
+type PerspectiveState string
+
+const (
+ PerspectiveEnabled PerspectiveState = "Enabled"
+ PerspectiveDisabled PerspectiveState = "Disabled"
+ PerspectiveAccessReview PerspectiveState = "AccessReview"
+)
+
+// ResourceAttributesAccessReview defines the visibility of the perspective depending on the access review checks.
+// `required` and `missing` can work together esp. in the case where the cluster admin
+// wants to show another perspective to users without specific permissions. Out of `required` and `missing` atleast one property should be non-empty.
+// +kubebuilder:validation:MinProperties:=1
+type ResourceAttributesAccessReview struct {
+ // required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list.
+ // +optional
+ Required []authorizationv1.ResourceAttributes `json:"required"`
+ // missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list.
+ // +optional
+ Missing []authorizationv1.ResourceAttributes `json:"missing"`
+}
+
+// PerspectiveVisibility defines the criteria to show/hide a perspective
+// +kubebuilder:validation:XValidation:rule="self.state == 'AccessReview' ? has(self.accessReview) : !has(self.accessReview)",message="accessReview configuration is required when state is AccessReview, and forbidden otherwise"
+// +union
+type PerspectiveVisibility struct {
+ // state defines the perspective is enabled or disabled or access review check is required.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Enum:="Enabled";"Disabled";"AccessReview"
+ // +kubebuilder:validation:Required
+ State PerspectiveState `json:"state"`
+ // accessReview defines required and missing access review checks.
+ // +optional
+ AccessReview *ResourceAttributesAccessReview `json:"accessReview,omitempty"`
+}
+
+// Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown
+// +kubebuilder:validation:XValidation:rule="has(self.id) && self.id != 'dev'? !has(self.pinnedResources) : true",message="pinnedResources is allowed only for dev and forbidden for other perspectives"
+// +optional
+type Perspective struct {
+ // id defines the id of the perspective.
+ // Example: "dev", "admin".
+ // The available perspective ids can be found in the code snippet section next to the yaml editor.
+ // Incorrect or unknown ids will be ignored.
+ // +kubebuilder:validation:Required
+ ID string `json:"id"`
+ // visibility defines the state of perspective along with access review checks if needed for that perspective.
+ // +kubebuilder:validation:Required
+ Visibility PerspectiveVisibility `json:"visibility"`
+ // pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves.
+ // The list of available Kubernetes resources could be read via `kubectl api-resources`.
+ // The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation.
+ // Incorrect or unknown resources will be ignored.
+ // +kubebuilder:validation:MaxItems=100
+ // +optional
+ PinnedResources *[]PinnedResourceReference `json:"pinnedResources,omitempty"`
+}
+
+// PinnedResourceReference includes the group, version and type of resource
+type PinnedResourceReference struct {
+ // group is the API Group of the Resource.
+ // Enter empty string for the core group.
+ // This value should consist of only lowercase alphanumeric characters, hyphens and periods.
+ // Example: "", "apps", "build.openshift.io", etc.
+ // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"
+ // +kubebuilder:validation:Required
+ Group string `json:"group"`
+ // version is the API Version of the Resource.
+ // This value should consist of only lowercase alphanumeric characters.
+ // Example: "v1", "v1beta1", etc.
+ // +kubebuilder:validation:Pattern:="^[a-z0-9]+$"
+ // +kubebuilder:validation:Required
+ Version string `json:"version"`
+ // resource is the type that is being referenced.
+ // It is normally the plural form of the resource kind in lowercase.
+ // This value should consist of only lowercase alphanumeric characters and hyphens.
+ // Example: "deployments", "deploymentconfigs", "pods", etc.
+ // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$"
+ // +kubebuilder:validation:Required
+ Resource string `json:"resource"`
+}
+
+// Brand is a specific supported brand within the console.
+type Brand string
+
+const (
+ // Legacy branding for OpenShift
+ BrandOpenShiftLegacy Brand = "openshift"
+ // Legacy branding for The Origin Community Distribution of Kubernetes
+ BrandOKDLegacy Brand = "okd"
+ // Legacy branding for OpenShift Online
+ BrandOnlineLegacy Brand = "online"
+ // Legacy branding for OpenShift Container Platform
+ BrandOCPLegacy Brand = "ocp"
+ // Legacy branding for OpenShift Dedicated
+ BrandDedicatedLegacy Brand = "dedicated"
+ // Legacy branding for Azure Red Hat OpenShift
+ BrandAzureLegacy Brand = "azure"
+ // Branding for OpenShift
+ BrandOpenShift Brand = "OpenShift"
+ // Branding for The Origin Community Distribution of Kubernetes
+ BrandOKD Brand = "OKD"
+ // Branding for OpenShift Online
+ BrandOnline Brand = "Online"
+ // Branding for OpenShift Container Platform
+ BrandOCP Brand = "OCP"
+ // Branding for OpenShift Dedicated
+ BrandDedicated Brand = "Dedicated"
+ // Branding for Azure Red Hat OpenShift
+ BrandAzure Brand = "Azure"
+ // Branding for Red Hat OpenShift Service on AWS
+ BrandROSA Brand = "ROSA"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ConsoleList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []Console `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go
new file mode 100644
index 0000000000..349c8d461d
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go
@@ -0,0 +1,334 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ClusterCSIDriver is used to manage and configure CSI driver installed by default
+// in OpenShift. An example configuration may look like:
+// apiVersion: operator.openshift.io/v1
+// kind: "ClusterCSIDriver"
+// metadata:
+// name: "ebs.csi.aws.com"
+// spec:
+// logLevel: Debug
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clustercsidrivers,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/701
+// +openshift:file-pattern=cvoRunLevel=0000_90,operatorName=csi-driver,operatorOrdering=01
+
+// ClusterCSIDriver object allows management and configuration of a CSI driver operator
+// installed by default in OpenShift. Name of the object must be name of the CSI driver
+// it operates. See CSIDriverName type for list of allowed values.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterCSIDriver struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ClusterCSIDriverSpec `json:"spec"`
+
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ClusterCSIDriverStatus `json:"status"`
+}
+
+// CSIDriverName is the name of the CSI driver
+type CSIDriverName string
+
+// +kubebuilder:validation:Enum="";Managed;Unmanaged;Removed
+// StorageClassStateName defines various configuration states for storageclass management
+// and reconciliation by CSI operator.
+type StorageClassStateName string
+
+const (
+ // ManagedStorageClass means that the operator is actively managing its storage classes.
+ // Most manual changes made by cluster admin to storageclass will be wiped away by CSI
+ // operator if StorageClassState is set to Managed.
+ ManagedStorageClass StorageClassStateName = "Managed"
+ // UnmanagedStorageClass means that the operator is not actively managing storage classes.
+ // If StorageClassState is Unmanaged then CSI operator will not be actively reconciling storage class
+ // it previously created. This can be useful if cluster admin wants to modify storage class installed
+ // by CSI operator.
+ UnmanagedStorageClass StorageClassStateName = "Unmanaged"
+ // RemovedStorageClass instructs the operator to remove the storage class.
+ // If StorageClassState is Removed - CSI operator will delete storage classes it created
+ // previously. This can be useful in clusters where cluster admins want to prevent
+ // creation of dynamically provisioned volumes but still need rest of the features
+ // provided by CSI operator and driver.
+ RemovedStorageClass StorageClassStateName = "Removed"
+)
+
+// If you are adding a new driver name here, ensure that 0000_90_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name.
+const (
+ AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com"
+ AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com"
+ AzureDiskCSIDriver CSIDriverName = "disk.csi.azure.com"
+ AzureFileCSIDriver CSIDriverName = "file.csi.azure.com"
+ GCPFilestoreCSIDriver CSIDriverName = "filestore.csi.storage.gke.io"
+ GCPPDCSIDriver CSIDriverName = "pd.csi.storage.gke.io"
+ CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org"
+ VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com"
+ ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org"
+ OvirtCSIDriver CSIDriverName = "csi.ovirt.org"
+ KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io"
+ SharedResourcesCSIDriver CSIDriverName = "csi.sharedresource.openshift.io"
+ AlibabaDiskCSIDriver CSIDriverName = "diskplugin.csi.alibabacloud.com"
+ IBMVPCBlockCSIDriver CSIDriverName = "vpc.block.csi.ibm.io"
+ IBMPowerVSBlockCSIDriver CSIDriverName = "powervs.csi.ibm.com"
+ SecretsStoreCSIDriver CSIDriverName = "secrets-store.csi.k8s.io"
+ SambaCSIDriver CSIDriverName = "smb.csi.k8s.io"
+)
+
+// ClusterCSIDriverSpec is the desired behavior of CSI driver operator
+type ClusterCSIDriverSpec struct {
+ OperatorSpec `json:",inline"`
+ // StorageClassState determines if CSI operator should create and manage storage classes.
+ // If this field value is empty or Managed - CSI operator will continuously reconcile
+ // storage class and create if necessary.
+ // If this field value is Unmanaged - CSI operator will not reconcile any previously created
+ // storage class.
+ // If this field value is Removed - CSI operator will delete the storage class it created previously.
+ // When omitted, this means the user has no opinion and the platform chooses a reasonable default,
+ // which is subject to change over time.
+ // The current default behaviour is Managed.
+ // +optional
+ StorageClassState StorageClassStateName `json:"storageClassState,omitempty"`
+
+ // driverConfig can be used to specify platform specific driver configuration.
+ // When omitted, this means no opinion and the platform is left to choose reasonable
+ // defaults. These defaults are subject to change over time.
+ // +optional
+ DriverConfig CSIDriverConfigSpec `json:"driverConfig"`
+}
+
+// CSIDriverType indicates type of CSI driver being configured.
+// +kubebuilder:validation:Enum="";AWS;Azure;GCP;IBMCloud;vSphere
+type CSIDriverType string
+
+const (
+ AWSDriverType CSIDriverType = "AWS"
+ AzureDriverType CSIDriverType = "Azure"
+ GCPDriverType CSIDriverType = "GCP"
+ IBMCloudDriverType CSIDriverType = "IBMCloud"
+ VSphereDriverType CSIDriverType = "vSphere"
+)
+
+// CSIDriverConfigSpec defines configuration spec that can be
+// used to optionally configure a specific CSI Driver.
+// +kubebuilder:validation:XValidation:rule="has(self.driverType) && self.driverType == 'IBMCloud' ? has(self.ibmcloud) : !has(self.ibmcloud)",message="ibmcloud must be set if driverType is 'IBMCloud', but remain unset otherwise"
+// +union
+type CSIDriverConfigSpec struct {
+ // driverType indicates type of CSI driver for which the
+ // driverConfig is being applied to.
+ // Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted.
+ // Consumers should treat unknown values as a NO-OP.
+ // +kubebuilder:validation:Required
+ // +unionDiscriminator
+ DriverType CSIDriverType `json:"driverType"`
+
+ // aws is used to configure the AWS CSI driver.
+ // +optional
+ AWS *AWSCSIDriverConfigSpec `json:"aws,omitempty"`
+
+ // azure is used to configure the Azure CSI driver.
+ // +optional
+ Azure *AzureCSIDriverConfigSpec `json:"azure,omitempty"`
+
+ // gcp is used to configure the GCP CSI driver.
+ // +optional
+ GCP *GCPCSIDriverConfigSpec `json:"gcp,omitempty"`
+
+ // ibmcloud is used to configure the IBM Cloud CSI driver.
+ // +optional
+ IBMCloud *IBMCloudCSIDriverConfigSpec `json:"ibmcloud,omitempty"`
+
+ // vsphere is used to configure the vsphere CSI driver.
+ // +optional
+ VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"`
+}
+
+// AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.
+type AWSCSIDriverConfigSpec struct {
+ // kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key,
+ // rather than the default KMS key used by AWS.
+ // The value may be either the ARN or Alias ARN of a KMS key.
+ // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$`
+ // +optional
+ KMSKeyARN string `json:"kmsKeyARN,omitempty"`
+}
+
+// AzureDiskEncryptionSet defines the configuration for a disk encryption set.
+type AzureDiskEncryptionSet struct {
+ // subscriptionID defines the Azure subscription that contains the disk encryption set.
+ // The value should meet the following conditions:
+ // 1. It should be a 128-bit number.
+ // 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long.
+ // 3. It should be displayed in five groups separated by hyphens (-).
+ // 4. The first group should be 8 characters long.
+ // 5. The second, third, and fourth groups should be 4 characters long.
+ // 6. The fifth group should be 12 characters long.
+ // An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength:=36
+ // +kubebuilder:validation:Pattern:=`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$`
+ SubscriptionID string `json:"subscriptionID"`
+
+ // resourceGroup defines the Azure resource group that contains the disk encryption set.
+ // The value should consist of only alphanumberic characters,
+ // underscores (_), parentheses, hyphens and periods.
+ // The value should not end in a period and be at most 90 characters in
+ // length.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength:=90
+ // +kubebuilder:validation:Pattern:=`^[\w\.\-\(\)]*[\w\-\(\)]$`
+ ResourceGroup string `json:"resourceGroup"`
+
+ // name is the name of the disk encryption set that will be set on the default storage class.
+ // The value should consist of only alphanumberic characters,
+ // underscores (_), hyphens, and be at most 80 characters in length.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength:=80
+ // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$`
+ Name string `json:"name"`
+}
+
+// AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver.
+type AzureCSIDriverConfigSpec struct {
+ // diskEncryptionSet sets the cluster default storage class to encrypt volumes with a
+ // customer-managed encryption set, rather than the default platform-managed keys.
+ // +optional
+ DiskEncryptionSet *AzureDiskEncryptionSet `json:"diskEncryptionSet,omitempty"`
+}
+
+// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key
+type GCPKMSKeyReference struct {
+ // name is the name of the customer-managed encryption key to be used for disk encryption.
+ // The value should correspond to an existing KMS key and should
+ // consist of only alphanumeric characters, hyphens (-) and underscores (_),
+ // and be at most 63 characters in length.
+ // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$`
+ // +kubebuilder:validation:MinLength:=1
+ // +kubebuilder:validation:MaxLength:=63
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+
+ // keyRing is the name of the KMS Key Ring which the KMS Key belongs to.
+ // The value should correspond to an existing KMS key ring and should
+ // consist of only alphanumeric characters, hyphens (-) and underscores (_),
+ // and be at most 63 characters in length.
+ // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$`
+ // +kubebuilder:validation:MinLength:=1
+ // +kubebuilder:validation:MaxLength:=63
+ // +kubebuilder:validation:Required
+ KeyRing string `json:"keyRing"`
+
+ // projectID is the ID of the Project in which the KMS Key Ring exists.
+ // It must be 6 to 30 lowercase letters, digits, or hyphens.
+ // It must start with a letter. Trailing hyphens are prohibited.
+ // +kubebuilder:validation:Pattern:=`^[a-z][a-z0-9-]+[a-z0-9]$`
+ // +kubebuilder:validation:MinLength:=6
+ // +kubebuilder:validation:MaxLength:=30
+ // +kubebuilder:validation:Required
+ ProjectID string `json:"projectID"`
+
+ // location is the GCP location in which the Key Ring exists.
+ // The value must match an existing GCP location, or "global".
+ // Defaults to global, if not set.
+ // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$`
+ // +optional
+ Location string `json:"location,omitempty"`
+}
+
+// GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver.
+type GCPCSIDriverConfigSpec struct {
+ // kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied
+ // encryption keys, rather than the default keys managed by GCP.
+ // +optional
+ KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"`
+}
+
+// IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver.
+type IBMCloudCSIDriverConfigSpec struct {
+ // encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use
+ // for disk encryption of volumes for the default storage classes.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength:=154
+ // +kubebuilder:validation:MinLength:=144
+ // +kubebuilder:validation:Pattern:=`^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$`
+ EncryptionKeyCRN string `json:"encryptionKeyCRN"`
+}
+
+// VSphereCSIDriverConfigSpec defines properties that
+// can be configured for vsphere CSI driver.
+type VSphereCSIDriverConfigSpec struct {
+ // topologyCategories indicates tag categories with which
+ // vcenter resources such as hostcluster or datacenter were tagged with.
+ // If cluster Infrastructure object has a topology, values specified in
+ // Infrastructure object will be used and modifications to topologyCategories
+ // will be rejected.
+ // +optional
+ TopologyCategories []string `json:"topologyCategories,omitempty"`
+
+ // globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of
+ // datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3.
+ // Snapshots can not be disabled using this parameter.
+ // Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279
+ // Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=32
+ // +openshift:enable:FeatureGate=VSphereDriverConfiguration
+ // +optional
+ GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"`
+
+ // granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It
+ // overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset.
+ // Snapshots for VSAN can not be disabled using this parameter.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=32
+ // +openshift:enable:FeatureGate=VSphereDriverConfiguration
+ // +optional
+ GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"`
+
+ // granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only.
+ // It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset.
+ // Snapshots for VVOL can not be disabled using this parameter.
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=32
+ // +openshift:enable:FeatureGate=VSphereDriverConfiguration
+ // +optional
+ GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"`
+}
+
+// ClusterCSIDriverStatus is the observed status of CSI driver operator
+type ClusterCSIDriverStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterCSIDriverList contains a list of ClusterCSIDriver
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterCSIDriverList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []ClusterCSIDriver `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go
new file mode 100644
index 0000000000..f96384819c
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go
@@ -0,0 +1,61 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=csisnapshotcontrollers,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/562
+// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=csi-snapshot-controller,operatorOrdering=01
+
+// CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type CSISnapshotController struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec CSISnapshotControllerSpec `json:"spec"`
+
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status CSISnapshotControllerStatus `json:"status"`
+}
+
+// CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.
+type CSISnapshotControllerSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+// CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.
+type CSISnapshotControllerStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CSISnapshotControllerList contains a list of CSISnapshotControllers.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type CSISnapshotControllerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []CSISnapshotController `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go
new file mode 100644
index 0000000000..3d7cbb6c00
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go
@@ -0,0 +1,530 @@
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=dnses,scope=Cluster
+// +kubebuilder:subresource:status
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=dns,operatorOrdering=00
+
+// DNS manages the CoreDNS component to provide a name resolution service
+// for pods and services in the cluster.
+//
+// This supports the DNS-based service discovery specification:
+// https://github.com/kubernetes/dns/blob/master/docs/specification.md
+//
+// More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DNS struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the specification of the desired behavior of the DNS.
+ Spec DNSSpec `json:"spec,omitempty"`
+ // status is the most recently observed status of the DNS.
+ Status DNSStatus `json:"status,omitempty"`
+}
+
+// DNSSpec is the specification of the desired behavior of the DNS.
+type DNSSpec struct {
+ // servers is a list of DNS resolvers that provide name query delegation for one or
+ // more subdomains outside the scope of the cluster domain. If servers consists of
+ // more than one Server, longest suffix match will be used to determine the Server.
+ //
+ // For example, if there are two Servers, one for "foo.com" and another for "a.foo.com",
+ // and the name query is for "www.a.foo.com", it will be routed to the Server with Zone
+ // "a.foo.com".
+ //
+ // If this field is nil, no servers are created.
+ //
+ // +optional
+ Servers []Server `json:"servers,omitempty"`
+
+ // upstreamResolvers defines a schema for configuring CoreDNS
+ // to proxy DNS messages to upstream resolvers for the case of the
+ // default (".") server
+ //
+ // If this field is not specified, the upstream used will default to
+ // /etc/resolv.conf, with policy "sequential"
+ //
+ // +optional
+ UpstreamResolvers UpstreamResolvers `json:"upstreamResolvers"`
+
+ // nodePlacement provides explicit control over the scheduling of DNS
+ // pods.
+ //
+ // Generally, it is useful to run a DNS pod on every node so that DNS
+ // queries are always handled by a local DNS pod instead of going over
+ // the network to a DNS pod on another node. However, security policies
+ // may require restricting the placement of DNS pods to specific nodes.
+ // For example, if a security policy prohibits pods on arbitrary nodes
+ // from communicating with the API, a node selector can be specified to
+ // restrict DNS pods to nodes that are permitted to communicate with the
+ // API. Conversely, if running DNS pods on nodes with a particular
+ // taint is desired, a toleration can be specified for that taint.
+ //
+ // If unset, defaults are used. See nodePlacement for more details.
+ //
+ // +optional
+ NodePlacement DNSNodePlacement `json:"nodePlacement,omitempty"`
+
+ // managementState indicates whether the DNS operator should manage cluster
+ // DNS
+ // +optional
+ ManagementState ManagementState `json:"managementState,omitempty"`
+
+ // operatorLogLevel controls the logging level of the DNS Operator.
+ // Valid values are: "Normal", "Debug", "Trace".
+ // Defaults to "Normal".
+ // setting operatorLogLevel: Trace will produce extremely verbose logs.
+ // +optional
+ // +kubebuilder:default=Normal
+ OperatorLogLevel DNSLogLevel `json:"operatorLogLevel,omitempty"`
+
+ // logLevel describes the desired logging verbosity for CoreDNS.
+ // Any one of the following values may be specified:
+ // * Normal logs errors from upstream resolvers.
+ // * Debug logs errors, NXDOMAIN responses, and NODATA responses.
+ // * Trace logs errors and all responses.
+ // Setting logLevel: Trace will produce extremely verbose logs.
+ // Valid values are: "Normal", "Debug", "Trace".
+ // Defaults to "Normal".
+ // +optional
+ // +kubebuilder:default=Normal
+ LogLevel DNSLogLevel `json:"logLevel,omitempty"`
+
+ // cache describes the caching configuration that applies to all server blocks listed in the Corefile.
+ // This field allows a cluster admin to optionally configure:
+ // * positiveTTL which is a duration for which positive responses should be cached.
+ // * negativeTTL which is a duration for which negative responses should be cached.
+ // If this is not configured, OpenShift will configure positive and negative caching with a default value that is
+ // subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is
+ // 30 seconds or as noted in the respective Corefile for your version of OpenShift.
+ // +optional
+ Cache DNSCache `json:"cache,omitempty"`
+}
+
+// DNSCache defines the fields for configuring DNS caching.
+type DNSCache struct {
+ // positiveTTL is optional and specifies the amount of time that a positive response should be cached.
+ //
+ // If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This
+ // field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix,
+ // e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second.
+ // If the configured value is less than 1s, the default value will be used.
+ // If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted
+ // otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject
+ // to change.
+ // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$
+ // +kubebuilder:validation:Type:=string
+ // +optional
+ PositiveTTL metav1.Duration `json:"positiveTTL,omitempty"`
+
+ // negativeTTL is optional and specifies the amount of time that a negative response should be cached.
+ //
+ // If configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This
+ // field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix,
+ // e.g. "100s", "1m30s", "12h30m10s". Values that are fractions of a second are rounded down to the nearest second.
+ // If the configured value is less than 1s, the default value will be used.
+ // If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted
+ // otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject
+ // to change.
+ // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$
+ // +kubebuilder:validation:Type:=string
+ // +optional
+ NegativeTTL metav1.Duration `json:"negativeTTL,omitempty"`
+}
+
+// +kubebuilder:validation:Enum:=Normal;Debug;Trace
+type DNSLogLevel string
+
+var (
+ // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2.
+ DNSLogLevelNormal DNSLogLevel = "Normal"
+
+ // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4.
+ DNSLogLevelDebug DNSLogLevel = "Debug"
+
+ // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6.
+ DNSLogLevelTrace DNSLogLevel = "Trace"
+)
+
+// Server defines the schema for a server that runs per instance of CoreDNS.
+type Server struct {
+ // name is required and specifies a unique name for the server. Name must comply
+ // with the Service Name Syntax of rfc6335.
+ Name string `json:"name"`
+ // zones is required and specifies the subdomains that Server is authoritative for.
+ // Zones must conform to the rfc1123 definition of a subdomain. Specifying the
+ // cluster domain (i.e., "cluster.local") is invalid.
+ Zones []string `json:"zones"`
+ // forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages
+ // to upstream resolvers.
+ ForwardPlugin ForwardPlugin `json:"forwardPlugin"`
+}
+
+// DNSTransport indicates what type of connection should be used.
+// +kubebuilder:validation:Enum=TLS;Cleartext;""
+type DNSTransport string
+
+const (
+ // TLSTransport indicates that TLS should be used for the connection.
+ TLSTransport DNSTransport = "TLS"
+
+ // CleartextTransport indicates that no encryption should be used for
+ // the connection.
+ CleartextTransport DNSTransport = "Cleartext"
+)
+
+// DNSTransportConfig groups related configuration parameters used for configuring
+// forwarding to upstream resolvers that support DNS-over-TLS.
+// +union
+type DNSTransportConfig struct {
+ // transport allows cluster administrators to opt-in to using a DNS-over-TLS
+ // connection between cluster DNS and an upstream resolver(s). Configuring
+ // TLS as the transport at this level without configuring a CABundle will
+ // result in the system certificates being used to verify the serving
+ // certificate of the upstream resolver(s).
+ //
+ // Possible values:
+ // "" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject
+ // to change over time. The current default is "Cleartext".
+ // "Cleartext" - Cluster admin specified cleartext option. This results in the same functionality
+ // as an empty value but may be useful when a cluster admin wants to be more explicit about the transport,
+ // or wants to switch from "TLS" to "Cleartext" explicitly.
+ // "TLS" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS,
+ // you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default
+ // per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1.
+ //
+ // +optional
+ // +unionDiscriminator
+ Transport DNSTransport `json:"transport,omitempty"`
+
+ // tls contains the additional configuration options to use when Transport is set to "TLS".
+ TLS *DNSOverTLSConfig `json:"tls,omitempty"`
+}
+
+// DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured.
+type DNSOverTLSConfig struct {
+ // serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is
+ // set to "TLS". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the
+ // TLS certificate installed in the upstream resolver(s).
+ //
+ // + ---
+ // + Inspired by the DNS1123 patterns in Kubernetes: https://github.com/kubernetes/kubernetes/blob/7c46f40bdf89a437ecdbc01df45e235b5f6d9745/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L218
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=253
+ // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`
+ ServerName string `json:"serverName"`
+
+ // caBundle references a ConfigMap that must contain either a single
+ // CA Certificate or a CA Bundle. This allows cluster administrators to provide their
+ // own CA or CA bundle for validating the certificate of upstream resolvers.
+ //
+ // 1. The configmap must contain a `ca-bundle.crt` key.
+ // 2. The value must be a PEM encoded CA certificate or CA bundle.
+ // 3. The administrator must create this configmap in the openshift-config namespace.
+ // 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName.
+ //
+ // +optional
+ CABundle v1.ConfigMapNameReference `json:"caBundle,omitempty"`
+}
+
+// ForwardingPolicy is the policy to use when forwarding DNS requests.
+// +kubebuilder:validation:Enum=Random;RoundRobin;Sequential
+type ForwardingPolicy string
+
+const (
+ // RandomForwardingPolicy picks a random upstream server for each query.
+ RandomForwardingPolicy ForwardingPolicy = "Random"
+
+ // RoundRobinForwardingPolicy picks upstream servers in a round-robin order, moving to the next server for each new query.
+ RoundRobinForwardingPolicy ForwardingPolicy = "RoundRobin"
+
+ // SequentialForwardingPolicy tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.
+ SequentialForwardingPolicy ForwardingPolicy = "Sequential"
+)
+
+// ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.
+type ForwardPlugin struct {
+ // upstreams is a list of resolvers to forward name queries for subdomains of Zones.
+ // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream
+ // returns an error during the exchange, another resolver is tried from Upstreams. The
+ // Upstreams are selected in the order specified in Policy. Each upstream is represented
+ // by an IP address or IP:port if the upstream listens on a port other than 53.
+ //
+ // A maximum of 15 upstreams is allowed per ForwardPlugin.
+ //
+ // +kubebuilder:validation:MaxItems=15
+ Upstreams []string `json:"upstreams"`
+
+ // policy is used to determine the order in which upstream servers are selected for querying.
+ // Any one of the following values may be specified:
+ //
+ // * "Random" picks a random upstream server for each query.
+ // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query.
+ // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.
+ //
+ // The default value is "Random"
+ //
+ // +optional
+ // +kubebuilder:default:="Random"
+ Policy ForwardingPolicy `json:"policy,omitempty"`
+
+ // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use
+ // when forwarding DNS requests to an upstream resolver.
+ //
+ // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS
+ // requests to an upstream resolver.
+ //
+ // +optional
+ TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"`
+
+ // protocolStrategy specifies the protocol to use for upstream DNS
+ // requests.
+ // Valid values for protocolStrategy are "TCP" and omitted.
+ // When omitted, this means no opinion and the platform is left to choose
+ // a reasonable default, which is subject to change over time.
+ // The current default is to use the protocol of the original client request.
+ // "TCP" specifies that the platform should use TCP for all upstream DNS requests,
+ // even if the client request uses UDP.
+ // "TCP" is useful for UDP-specific issues such as those created by
+ // non-compliant upstream resolvers, but may consume more bandwidth or
+ // increase DNS response time. Note that protocolStrategy only affects
+ // the protocol of DNS requests that CoreDNS makes to upstream resolvers.
+ // It does not affect the protocol of DNS requests between clients and
+ // CoreDNS.
+ //
+ // +optional
+ ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"`
+}
+
+// UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the
+// specific case of the default (".") server.
+// It defers from ForwardPlugin in the default values it accepts:
+// * At least one upstream should be specified.
+// * the default policy is Sequential
+type UpstreamResolvers struct {
+ // Upstreams is a list of resolvers to forward name queries for the "." domain.
+ // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream
+ // returns an error during the exchange, another resolver is tried from Upstreams. The
+ // Upstreams are selected in the order specified in Policy.
+ //
+ // A maximum of 15 upstreams is allowed per ForwardPlugin.
+ // If no Upstreams are specified, /etc/resolv.conf is used by default
+ //
+ // +optional
+ // +kubebuilder:validation:MaxItems=15
+ // +kubebuilder:default={{"type":"SystemResolvConf"}}
+ Upstreams []Upstream `json:"upstreams"`
+
+ // Policy is used to determine the order in which upstream servers are selected for querying.
+ // Any one of the following values may be specified:
+ //
+ // * "Random" picks a random upstream server for each query.
+ // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query.
+ // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.
+ //
+ // The default value is "Sequential"
+ //
+ // +optional
+ // +kubebuilder:default="Sequential"
+ Policy ForwardingPolicy `json:"policy,omitempty"`
+
+ // transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use
+ // when forwarding DNS requests to an upstream resolver.
+ //
+ // The default value is "" (empty) which results in a standard cleartext connection being used when forwarding DNS
+ // requests to an upstream resolver.
+ //
+ // +optional
+ TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"`
+
+ // protocolStrategy specifies the protocol to use for upstream DNS
+ // requests.
+ // Valid values for protocolStrategy are "TCP" and omitted.
+ // When omitted, this means no opinion and the platform is left to choose
+ // a reasonable default, which is subject to change over time.
+ // The current default is to use the protocol of the original client request.
+ // "TCP" specifies that the platform should use TCP for all upstream DNS requests,
+ // even if the client request uses UDP.
+ // "TCP" is useful for UDP-specific issues such as those created by
+ // non-compliant upstream resolvers, but may consume more bandwidth or
+ // increase DNS response time. Note that protocolStrategy only affects
+ // the protocol of DNS requests that CoreDNS makes to upstream resolvers.
+ // It does not affect the protocol of DNS requests between clients and
+ // CoreDNS.
+ //
+ // +optional
+ ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"`
+}
+
+// Upstream can either be of type SystemResolvConf, or of type Network.
+//
+// - For an Upstream of type SystemResolvConf, no further fields are necessary:
+// The upstream will be configured to use /etc/resolv.conf.
+// - For an Upstream of type Network, a NetworkResolver field needs to be defined
+// with an IP address or IP:port if the upstream listens on a port other than 53.
+type Upstream struct {
+
+ // Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf.
+ // Type accepts 2 possible values: SystemResolvConf or Network.
+ //
+ // * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:
+ // /etc/resolv.conf will be used
+ // * When Network is used, the Upstream structure must contain at least an Address
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Type UpstreamType `json:"type"`
+
+ // Address must be defined when Type is set to Network. It will be ignored otherwise.
+ // It must be a valid ipv4 or ipv6 address.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ Address string `json:"address,omitempty"`
+
+ // Port may be defined when Type is set to Network. It will be ignored otherwise.
+ // Port must be between 65535
+ //
+ // +optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default=53
+ Port uint32 `json:"port,omitempty"`
+}
+
+// +kubebuilder:validation:Enum=SystemResolvConf;Network;""
+type UpstreamType string
+
+const (
+ SystemResolveConfType UpstreamType = "SystemResolvConf"
+ NetworkResolverType UpstreamType = "Network"
+)
+
+// ProtocolStrategy is a preference for the protocol to use for DNS queries.
+// + ---
+// + When consumers observe an unknown value, they should use the default strategy.
+// +kubebuilder:validation:Enum:=TCP;""
+type ProtocolStrategy string
+
+var (
+ // ProtocolStrategyDefault specifies no opinion for DNS protocol.
+ // If empty, the default behavior of CoreDNS is used. Currently, this means that CoreDNS uses the protocol of the
+ // originating client request as the upstream protocol.
+ // Note that the default behavior of CoreDNS is subject to change.
+ ProtocolStrategyDefault ProtocolStrategy = ""
+
+ // ProtocolStrategyTCP instructs CoreDNS to always use TCP, regardless of the originating client's request protocol.
+ ProtocolStrategyTCP ProtocolStrategy = "TCP"
+)
+
+// DNSNodePlacement describes the node scheduling configuration for DNS pods.
+type DNSNodePlacement struct {
+ // nodeSelector is the node selector applied to DNS pods.
+ //
+ // If empty, the default is used, which is currently the following:
+ //
+ // kubernetes.io/os: linux
+ //
+ // This default is subject to change.
+ //
+ // If set, the specified selector is used and replaces the default.
+ //
+ // +optional
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+ // tolerations is a list of tolerations applied to DNS pods.
+ //
+ // If empty, the DNS operator sets a toleration for the
+ // "node-role.kubernetes.io/master" taint. This default is subject to
+ // change. Specifying tolerations without including a toleration for
+ // the "node-role.kubernetes.io/master" taint may be risky as it could
+ // lead to an outage if all worker nodes become unavailable.
+ //
+ // Note that the daemon controller adds some tolerations as well. See
+ // https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ //
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+}
+
+const (
+ // Available indicates the DNS controller daemonset is available.
+ DNSAvailable = "Available"
+)
+
+// DNSStatus defines the observed status of the DNS.
+type DNSStatus struct {
+ // clusterIP is the service IP through which this DNS is made available.
+ //
+ // In the case of the default DNS, this will be a well known IP that is used
+ // as the default nameserver for pods that are using the default ClusterFirst DNS policy.
+ //
+ // In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list
+ // or used explicitly when performing name resolution from within the cluster.
+ // Example: dig foo.com @
+ //
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ ClusterIP string `json:"clusterIP"`
+
+ // clusterDomain is the local cluster DNS domain suffix for DNS services.
+ // This will be a subdomain as defined in RFC 1034,
+ // section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5
+ // Example: "cluster.local"
+ //
+ // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ ClusterDomain string `json:"clusterDomain"`
+
+ // conditions provide information about the state of the DNS on the cluster.
+ //
+ // These are the supported DNS conditions:
+ //
+ // * Available
+ // - True if the following conditions are met:
+ // * DNS controller daemonset is available.
+ // - False if any of those conditions are unsatisfied.
+ //
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []OperatorCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DNSList contains a list of DNS
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type DNSList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []DNS `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go
new file mode 100644
index 0000000000..71345d7d78
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go
@@ -0,0 +1,98 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=etcds,scope=Cluster,categories=coreoperators
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/752
+// +openshift:file-pattern=cvoRunLevel=0000_12,operatorName=etcd,operatorOrdering=01
+
+// Etcd provides information to configure an operator to manage etcd.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Etcd struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec EtcdSpec `json:"spec"`
+ // +optional
+ Status EtcdStatus `json:"status"`
+}
+
+type EtcdSpec struct {
+ StaticPodOperatorSpec `json:",inline"`
+ // HardwareSpeed allows user to change the etcd tuning profile which configures
+ // the latency parameters for heartbeat interval and leader election timeouts
+ // allowing the cluster to tolerate longer round-trip-times between etcd members.
+ // Valid values are "", "Standard" and "Slower".
+ // "" means no opinion and the platform is left to choose a reasonable default
+ // which is subject to change without notice.
+ // +kubebuilder:validation:Optional
+ // +openshift:enable:FeatureGate=HardwareSpeed
+ // +optional
+ HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"`
+
+ // backendQuotaGiB sets the etcd backend storage size limit in gibibytes.
+ // The value should be an integer not less than 8 and not more than 32.
+ // When not specified, the default value is 8.
+ // +kubebuilder:default:=8
+ // +kubebuilder:validation:Minimum=8
+ // +kubebuilder:validation:Maximum=32
+ // +kubebuilder:validation:XValidation:rule="self>=oldSelf",message="etcd backendQuotaGiB may not be decreased"
+ // +openshift:enable:FeatureGate=EtcdBackendQuota
+ // +default=8
+ // +optional
+ BackendQuotaGiB int32 `json:"backendQuotaGiB,omitempty"`
+}
+
+type EtcdStatus struct {
+ StaticPodOperatorStatus `json:",inline"`
+ HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"`
+}
+
+const (
+ // StandardHardwareSpeed provides the normal tolerances for hardware speed and latency.
+ // Currently sets (values subject to change at any time):
+ // ETCD_HEARTBEAT_INTERVAL: 100ms
+ // ETCD_LEADER_ELECTION_TIMEOUT: 1000ms
+ StandardHardwareSpeed ControlPlaneHardwareSpeed = "Standard"
+ // SlowerHardwareSpeed provides more tolerance for slower hardware and/or higher latency networks.
+ // Sets (values subject to change):
+ // ETCD_HEARTBEAT_INTERVAL: 5x Standard
+ // ETCD_LEADER_ELECTION_TIMEOUT: 2.5x Standard
+ SlowerHardwareSpeed ControlPlaneHardwareSpeed = "Slower"
+)
+
+// ControlPlaneHardwareSpeed declares valid hardware speed tolerance levels
+// +enum
+// +kubebuilder:validation:Enum:="";Standard;Slower
+type ControlPlaneHardwareSpeed string
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KubeAPISOperatorConfigList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type EtcdList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []Etcd `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go
new file mode 100644
index 0000000000..64419ddfc0
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go
@@ -0,0 +1,1901 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ corev1 "k8s.io/api/core/v1"
+
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector
+// +kubebuilder:resource:path=ingresscontrollers,scope=Namespaced
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/616
+// +openshift:capability=Ingress
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=ingress,operatorOrdering=00
+
+// IngressController describes a managed ingress controller for the cluster. The
+// controller can service OpenShift Route and Kubernetes Ingress resources.
+//
+// When an IngressController is created, a new ingress controller deployment is
+// created to allow external traffic to reach the services that expose Ingress
+// or Route resources. Updating this resource may lead to disruption for public
+// facing network connections as a new ingress controller revision may be rolled
+// out.
+//
+// https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
+//
+// Whenever possible, sensible defaults for the platform are used. See each
+// field for more details.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type IngressController struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the specification of the desired behavior of the IngressController.
+ Spec IngressControllerSpec `json:"spec,omitempty"`
+ // status is the most recently observed status of the IngressController.
+ Status IngressControllerStatus `json:"status,omitempty"`
+}
+
+// IngressControllerSpec is the specification of the desired behavior of the
+// IngressController.
+type IngressControllerSpec struct {
+ // domain is a DNS name serviced by the ingress controller and is used to
+ // configure multiple features:
+ //
+ // * For the LoadBalancerService endpoint publishing strategy, domain is
+ // used to configure DNS records. See endpointPublishingStrategy.
+ //
+ // * When using a generated default certificate, the certificate will be valid
+ // for domain and its subdomains. See defaultCertificate.
+ //
+ // * The value is published to individual Route statuses so that end-users
+ // know where to target external DNS records.
+ //
+ // domain must be unique among all IngressControllers, and cannot be
+ // updated.
+ //
+ // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain.
+ //
+ // +optional
+ Domain string `json:"domain,omitempty"`
+
+ // httpErrorCodePages specifies a configmap with custom error pages.
+ // The administrator must create this configmap in the openshift-config namespace.
+ // This configmap should have keys in the format "error-page-.http",
+ // where is an HTTP error code.
+ // For example, "error-page-503.http" defines an error page for HTTP 503 responses.
+ // Currently only error pages for 503 and 404 responses can be customized.
+ // Each value in the configmap should be the full response, including HTTP headers.
+ // Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http
+ // If this field is empty, the ingress controller uses the default error pages.
+ HttpErrorCodePages configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"`
+
+ // replicas is the desired number of ingress controller replicas. If unset,
+ // the default depends on the value of the defaultPlacement field in the
+ // cluster config.openshift.io/v1/ingresses status.
+ //
+ // The value of replicas is set based on the value of a chosen field in the
+ // Infrastructure CR. If defaultPlacement is set to ControlPlane, the
+ // chosen field will be controlPlaneTopology. If it is set to Workers the
+ // chosen field will be infrastructureTopology. Replicas will then be set to 1
+ // or 2 based whether the chosen field's value is SingleReplica or
+ // HighlyAvailable, respectively.
+ //
+ // These defaults are subject to change.
+ //
+ // +optional
+ Replicas *int32 `json:"replicas,omitempty"`
+
+ // endpointPublishingStrategy is used to publish the ingress controller
+ // endpoints to other networks, enable load balancer integrations, etc.
+ //
+ // If unset, the default is based on
+ // infrastructure.config.openshift.io/cluster .status.platform:
+ //
+ // AWS: LoadBalancerService (with External scope)
+ // Azure: LoadBalancerService (with External scope)
+ // GCP: LoadBalancerService (with External scope)
+ // IBMCloud: LoadBalancerService (with External scope)
+ // AlibabaCloud: LoadBalancerService (with External scope)
+ // Libvirt: HostNetwork
+ //
+ // Any other platform types (including None) default to HostNetwork.
+ //
+ // endpointPublishingStrategy cannot be updated.
+ //
+ // +optional
+ EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"`
+
+ // defaultCertificate is a reference to a secret containing the default
+ // certificate served by the ingress controller. When Routes don't specify
+ // their own certificate, defaultCertificate is used.
+ //
+ // The secret must contain the following keys and data:
+ //
+ // tls.crt: certificate file contents
+ // tls.key: key file contents
+ //
+ // If unset, a wildcard certificate is automatically generated and used. The
+ // certificate is valid for the ingress controller domain (and subdomains) and
+ // the generated certificate's CA will be automatically integrated with the
+ // cluster's trust store.
+ //
+ // If a wildcard certificate is used and shared by multiple
+ // HTTP/2 enabled routes (which implies ALPN) then clients
+ // (i.e., notably browsers) are at liberty to reuse open
+ // connections. This means a client can reuse a connection to
+ // another route and that is likely to fail. This behaviour is
+ // generally known as connection coalescing.
+ //
+ // The in-use certificate (whether generated or user-specified) will be
+ // automatically integrated with OpenShift's built-in OAuth server.
+ //
+ // +optional
+ DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"`
+
+ // namespaceSelector is used to filter the set of namespaces serviced by the
+ // ingress controller. This is useful for implementing shards.
+ //
+ // If unset, the default is no filtering.
+ //
+ // +optional
+ NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
+
+ // routeSelector is used to filter the set of Routes serviced by the ingress
+ // controller. This is useful for implementing shards.
+ //
+ // If unset, the default is no filtering.
+ //
+ // +optional
+ RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"`
+
+ // nodePlacement enables explicit control over the scheduling of the ingress
+ // controller.
+ //
+ // If unset, defaults are used. See NodePlacement for more details.
+ //
+ // +optional
+ NodePlacement *NodePlacement `json:"nodePlacement,omitempty"`
+
+ // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.
+ //
+ // If unset, the default is based on the apiservers.config.openshift.io/cluster resource.
+ //
+ // Note that when using the Old, Intermediate, and Modern profile types, the effective
+ // profile configuration is subject to change between releases. For example, given
+ // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade
+ // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress
+ // controller, resulting in a rollout.
+ //
+ // +optional
+ TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"`
+
+ // clientTLS specifies settings for requesting and verifying client
+ // certificates, which can be used to enable mutual TLS for
+ // edge-terminated and reencrypt routes.
+ //
+ // +optional
+ ClientTLS ClientTLS `json:"clientTLS"`
+
+ // routeAdmission defines a policy for handling new route claims (for example,
+ // to allow or deny claims across namespaces).
+ //
+ // If empty, defaults will be applied. See specific routeAdmission fields
+ // for details about their defaults.
+ //
+ // +optional
+ RouteAdmission *RouteAdmissionPolicy `json:"routeAdmission,omitempty"`
+
+ // logging defines parameters for what should be logged where. If this
+ // field is empty, operational logs are enabled but access logs are
+ // disabled.
+ //
+ // +optional
+ Logging *IngressControllerLogging `json:"logging,omitempty"`
+
+ // httpHeaders defines policy for HTTP headers.
+ //
+ // If this field is empty, the default values are used.
+ //
+ // +optional
+ HTTPHeaders *IngressControllerHTTPHeaders `json:"httpHeaders,omitempty"`
+
+ // httpEmptyRequestsPolicy describes how HTTP connections should be
+ // handled if the connection times out before a request is received.
+ // Allowed values for this field are "Respond" and "Ignore". If the
+ // field is set to "Respond", the ingress controller sends an HTTP 400
+ // or 408 response, logs the connection (if access logging is enabled),
+ // and counts the connection in the appropriate metrics. If the field
+ // is set to "Ignore", the ingress controller closes the connection
+ // without sending a response, logging the connection, or incrementing
+ // metrics. The default value is "Respond".
+ //
+ // Typically, these connections come from load balancers' health probes
+ // or Web browsers' speculative connections ("preconnect") and can be
+ // safely ignored. However, these requests may also be caused by
+ // network errors, and so setting this field to "Ignore" may impede
+ // detection and diagnosis of problems. In addition, these requests may
+ // be caused by port scans, in which case logging empty requests may aid
+ // in detecting intrusion attempts.
+ //
+ // +optional
+ // +kubebuilder:default:="Respond"
+ HTTPEmptyRequestsPolicy HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"`
+
+ // tuningOptions defines parameters for adjusting the performance of
+ // ingress controller pods. All fields are optional and will use their
+ // respective defaults if not set. See specific tuningOptions fields for
+ // more details.
+ //
+ // Setting fields within tuningOptions is generally not recommended. The
+ // default values are suitable for most configurations.
+ //
+ // +optional
+ TuningOptions IngressControllerTuningOptions `json:"tuningOptions,omitempty"`
+
+ // unsupportedConfigOverrides allows specifying unsupported
+ // configuration options. Its use is unsupported.
+ //
+ // +optional
+ // +nullable
+ // +kubebuilder:pruning:PreserveUnknownFields
+ UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"`
+
+ // httpCompression defines a policy for HTTP traffic compression.
+ // By default, there is no HTTP compression.
+ //
+ // +optional
+ HTTPCompression HTTPCompressionPolicy `json:"httpCompression,omitempty"`
+}
+
+// httpCompressionPolicy turns on compression for the specified MIME types.
+//
+// This field is optional, and its absence implies that compression should not be enabled
+// globally in HAProxy.
+//
+// If httpCompressionPolicy exists, compression should be enabled only for the specified
+// MIME types.
+type HTTPCompressionPolicy struct {
+ // mimeTypes is a list of MIME types that should have compression applied.
+ // This list can be empty, in which case the ingress controller does not apply compression.
+ //
+ // Note: Not all MIME types benefit from compression, but HAProxy will still use resources
+ // to try to compress if instructed to. Generally speaking, text (html, css, js, etc.)
+ // formats benefit from compression, but formats that are already compressed (image,
+ // audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing
+ // again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2
+ //
+ // +listType=set
+ MimeTypes []CompressionMIMEType `json:"mimeTypes,omitempty"`
+}
+
+// CompressionMIMEType defines the format of a single MIME type.
+// E.g. "text/css; charset=utf-8", "text/html", "text/*", "image/svg+xml",
+// "application/octet-stream", "X-custom/customsub", etc.
+//
+// The format should follow the Content-Type definition in RFC 1341:
+// Content-Type := type "/" subtype *[";" parameter]
+// - The type in Content-Type can be one of:
+// application, audio, image, message, multipart, text, video, or a custom
+// type preceded by "X-" and followed by a token as defined below.
+// - The token is a string of at least one character, and not containing white
+// space, control characters, or any of the characters in the tspecials set.
+// - The tspecials set contains the characters ()<>@,;:\"/[]?.=
+// - The subtype in Content-Type is also a token.
+// - The optional parameter/s following the subtype are defined as:
+// token "=" (token / quoted-string)
+// - The quoted-string, as defined in RFC 822, is surrounded by double quotes
+// and can contain white space plus any character EXCEPT \, ", and CR.
+// It can also contain any single ASCII character as long as it is escaped by \.
+//
+// +kubebuilder:validation:Pattern=`^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$`
+type CompressionMIMEType string
+
+// NodePlacement describes node scheduling configuration for an ingress
+// controller.
+type NodePlacement struct {
+ // nodeSelector is the node selector applied to ingress controller
+ // deployments.
+ //
+ // If set, the specified selector is used and replaces the default.
+ //
+ // If unset, the default depends on the value of the defaultPlacement
+ // field in the cluster config.openshift.io/v1/ingresses status.
+ //
+ // When defaultPlacement is Workers, the default is:
+ //
+ // kubernetes.io/os: linux
+ // node-role.kubernetes.io/worker: ''
+ //
+ // When defaultPlacement is ControlPlane, the default is:
+ //
+ // kubernetes.io/os: linux
+ // node-role.kubernetes.io/master: ''
+ //
+ // These defaults are subject to change.
+ //
+ // Note that using nodeSelector.matchExpressions is not supported. Only
+ // nodeSelector.matchLabels may be used. This is a limitation of the
+ // Kubernetes API: the pod spec does not allow complex expressions for
+ // node selectors.
+ //
+ // +optional
+ NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"`
+
+ // tolerations is a list of tolerations applied to ingress controller
+ // deployments.
+ //
+ // The default is an empty list.
+ //
+ // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ //
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+}
+
+// EndpointPublishingStrategyType is a way to publish ingress controller endpoints.
+// +kubebuilder:validation:Enum=LoadBalancerService;HostNetwork;Private;NodePortService
+type EndpointPublishingStrategyType string
+
+const (
+ // LoadBalancerService publishes the ingress controller using a Kubernetes
+ // LoadBalancer Service.
+ LoadBalancerServiceStrategyType EndpointPublishingStrategyType = "LoadBalancerService"
+
+ // HostNetwork publishes the ingress controller on node ports where the
+ // ingress controller is deployed.
+ HostNetworkStrategyType EndpointPublishingStrategyType = "HostNetwork"
+
+ // Private does not publish the ingress controller.
+ PrivateStrategyType EndpointPublishingStrategyType = "Private"
+
+ // NodePortService publishes the ingress controller using a Kubernetes NodePort Service.
+ NodePortServiceStrategyType EndpointPublishingStrategyType = "NodePortService"
+)
+
+// LoadBalancerScope is the scope at which a load balancer is exposed.
+// +kubebuilder:validation:Enum=Internal;External
+type LoadBalancerScope string
+
+var (
+ // InternalLoadBalancer is a load balancer that is exposed only on the
+ // cluster's private network.
+ InternalLoadBalancer LoadBalancerScope = "Internal"
+
+ // ExternalLoadBalancer is a load balancer that is exposed on the
+ // cluster's public network (which is typically on the Internet).
+ ExternalLoadBalancer LoadBalancerScope = "External"
+)
+
+// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8"
+// or "fd00::/8").
+// +kubebuilder:validation:Pattern=`(^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$)`
+// + ---
+// + The regex for the IPv4 CIDR range was taken from other CIDR fields in the OpenShift API
+// + and the one for the IPv6 CIDR range was taken from
+// + https://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/
+// + The resulting regex is an OR of both regexes.
+type CIDR string
+
+// LoadBalancerStrategy holds parameters for a load balancer.
+type LoadBalancerStrategy struct {
+ // scope indicates the scope at which the load balancer is exposed.
+ // Possible values are "External" and "Internal".
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Scope LoadBalancerScope `json:"scope"`
+
+ // allowedSourceRanges specifies an allowlist of IP address ranges to which
+ // access to the load balancer should be restricted. Each range must be
+ // specified using CIDR notation (e.g. "10.0.0.0/8" or "fd00::/8"). If no range is
+ // specified, "0.0.0.0/0" for IPv4 and "::/0" for IPv6 are used by default,
+ // which allows all source addresses.
+ //
+ // To facilitate migration from earlier versions of OpenShift that did
+ // not have the allowedSourceRanges field, you may set the
+ // service.beta.kubernetes.io/load-balancer-source-ranges annotation on
+ // the "router-" service in the
+ // "openshift-ingress" namespace, and this annotation will take
+ // effect if allowedSourceRanges is empty on OpenShift 4.12.
+ //
+ // +nullable
+ // +optional
+ AllowedSourceRanges []CIDR `json:"allowedSourceRanges,omitempty"`
+
+ // providerParameters holds desired load balancer information specific to
+ // the underlying infrastructure provider.
+ //
+ // If empty, defaults will be applied. See specific providerParameters
+ // fields for details about their defaults.
+ //
+ // +optional
+ ProviderParameters *ProviderLoadBalancerParameters `json:"providerParameters,omitempty"`
+
+ // dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record
+ // associated with the load balancer service will be managed by
+ // the ingress operator. It defaults to Managed.
+ // Valid values are: Managed and Unmanaged.
+ //
+ // +kubebuilder:default:="Managed"
+ // +kubebuilder:validation:Required
+ // +default="Managed"
+ DNSManagementPolicy LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"`
+}
+
+// LoadBalancerDNSManagementPolicy is a policy for configuring how
+// ingresscontrollers manage DNS.
+//
+// +kubebuilder:validation:Enum=Managed;Unmanaged
+type LoadBalancerDNSManagementPolicy string
+
+const (
+ // ManagedLoadBalancerDNS specifies that the operator manages
+ // a wildcard DNS record for the ingresscontroller.
+ ManagedLoadBalancerDNS LoadBalancerDNSManagementPolicy = "Managed"
+ // UnmanagedLoadBalancerDNS specifies that the operator does not manage
+ // any wildcard DNS record for the ingresscontroller.
+ UnmanagedLoadBalancerDNS LoadBalancerDNSManagementPolicy = "Unmanaged"
+)
+
+// ProviderLoadBalancerParameters holds desired load balancer information
+// specific to the underlying infrastructure provider.
+// +union
+type ProviderLoadBalancerParameters struct {
+ // type is the underlying infrastructure provider for the load balancer.
+ // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix",
+ // "OpenStack", and "VSphere".
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ // +required
+ Type LoadBalancerProviderType `json:"type"`
+
+ // aws provides configuration settings that are specific to AWS
+ // load balancers.
+ //
+ // If empty, defaults will be applied. See specific aws fields for
+ // details about their defaults.
+ //
+ // +optional
+ AWS *AWSLoadBalancerParameters `json:"aws,omitempty"`
+
+ // gcp provides configuration settings that are specific to GCP
+ // load balancers.
+ //
+ // If empty, defaults will be applied. See specific gcp fields for
+ // details about their defaults.
+ //
+ // +optional
+ GCP *GCPLoadBalancerParameters `json:"gcp,omitempty"`
+
+ // ibm provides configuration settings that are specific to IBM Cloud
+ // load balancers.
+ //
+ // If empty, defaults will be applied. See specific ibm fields for
+ // details about their defaults.
+ //
+ // +optional
+ IBM *IBMLoadBalancerParameters `json:"ibm,omitempty"`
+}
+
+// LoadBalancerProviderType is the underlying infrastructure provider for the
+// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix",
+// "OpenStack", and "VSphere".
+//
+// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;Nutanix;OpenStack;VSphere;IBM
+type LoadBalancerProviderType string
+
+const (
+ AWSLoadBalancerProvider LoadBalancerProviderType = "AWS"
+ AzureLoadBalancerProvider LoadBalancerProviderType = "Azure"
+ GCPLoadBalancerProvider LoadBalancerProviderType = "GCP"
+ OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack"
+ VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere"
+ IBMLoadBalancerProvider LoadBalancerProviderType = "IBM"
+ BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal"
+ AlibabaCloudLoadBalancerProvider LoadBalancerProviderType = "AlibabaCloud"
+ NutanixLoadBalancerProvider LoadBalancerProviderType = "Nutanix"
+)
+
+// AWSLoadBalancerParameters provides configuration settings that are
+// specific to AWS load balancers.
+// +union
+type AWSLoadBalancerParameters struct {
+ // type is the type of AWS load balancer to instantiate for an ingresscontroller.
+ //
+ // Valid values are:
+ //
+ // * "Classic": A Classic Load Balancer that makes routing decisions at either
+ // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See
+ // the following for additional details:
+ //
+ // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb
+ //
+ // * "NLB": A Network Load Balancer that makes routing decisions at the
+ // transport layer (TCP/SSL). See the following for additional details:
+ //
+ // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ // +required
+ Type AWSLoadBalancerType `json:"type"`
+
+ // classicLoadBalancerParameters holds configuration parameters for an AWS
+ // classic load balancer. Present only if type is Classic.
+ //
+ // +optional
+ ClassicLoadBalancerParameters *AWSClassicLoadBalancerParameters `json:"classicLoadBalancer,omitempty"`
+
+ // networkLoadBalancerParameters holds configuration parameters for an AWS
+ // network load balancer. Present only if type is NLB.
+ //
+ // +optional
+ NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParameters `json:"networkLoadBalancer,omitempty"`
+}
+
+// AWSLoadBalancerType is the type of AWS load balancer to instantiate.
+// +kubebuilder:validation:Enum=Classic;NLB
+type AWSLoadBalancerType string
+
+const (
+ AWSClassicLoadBalancer AWSLoadBalancerType = "Classic"
+ AWSNetworkLoadBalancer AWSLoadBalancerType = "NLB"
+)
+
+// GCPLoadBalancerParameters provides configuration settings that are
+// specific to GCP load balancers.
+type GCPLoadBalancerParameters struct {
+ // clientAccess describes how client access is restricted for internal
+ // load balancers.
+ //
+ // Valid values are:
+ // * "Global": Specifying an internal load balancer with Global client access
+ // allows clients from any region within the VPC to communicate with the load
+ // balancer.
+ //
+ // https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access
+ //
+ // * "Local": Specifying an internal load balancer with Local client access
+ // means only clients within the same region (and VPC) as the GCP load balancer
+ // can communicate with the load balancer. Note that this is the default behavior.
+ //
+ // https://cloud.google.com/load-balancing/docs/internal#client_access
+ //
+ // +optional
+ ClientAccess GCPClientAccess `json:"clientAccess,omitempty"`
+}
+
+// GCPClientAccess describes how client access is restricted for internal
+// load balancers.
+// +kubebuilder:validation:Enum=Global;Local
+type GCPClientAccess string
+
+const (
+ GCPGlobalAccess GCPClientAccess = "Global"
+ GCPLocalAccess GCPClientAccess = "Local"
+)
+
+// IBMLoadBalancerParameters provides configuration settings that are
+// specific to IBM Cloud load balancers.
+type IBMLoadBalancerParameters struct {
+ // protocol specifies whether the load balancer uses PROXY protocol to forward connections to
+ // the IngressController. See "service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features:
+ // "proxy-protocol"" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas"
+ //
+ // PROXY protocol can be used with load balancers that support it to
+ // communicate the source addresses of client connections when
+ // forwarding those connections to the IngressController. Using PROXY
+ // protocol enables the IngressController to report those source
+ // addresses instead of reporting the load balancer's address in HTTP
+ // headers and logs. Note that enabling PROXY protocol on the
+ // IngressController will cause connections to fail if you are not using
+ // a load balancer that uses PROXY protocol to forward connections to
+ // the IngressController. See
+ // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for
+ // information about PROXY protocol.
+ //
+ // Valid values for protocol are TCP, PROXY and omitted.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.
+ // The current default is TCP, without the proxy protocol enabled.
+ //
+ // +optional
+ Protocol IngressControllerProtocol `json:"protocol,omitempty"`
+}
+
+// AWSClassicLoadBalancerParameters holds configuration parameters for an
+// AWS Classic load balancer.
+type AWSClassicLoadBalancerParameters struct {
+ // connectionIdleTimeout specifies the maximum time period that a
+ // connection may be idle before the load balancer closes the
+ // connection. The value must be parseable as a time duration value;
+ // see . A nil or zero value
+ // means no opinion, in which case a default value is used. The default
+ // value for this field is 60s. This default is subject to change.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Format=duration
+ // +optional
+ ConnectionIdleTimeout metav1.Duration `json:"connectionIdleTimeout,omitempty"`
+}
+
+// AWSNetworkLoadBalancerParameters holds configuration parameters for an
+// AWS Network load balancer.
+type AWSNetworkLoadBalancerParameters struct {
+}
+
+// HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing
+// strategy.
+type HostNetworkStrategy struct {
+ // protocol specifies whether the IngressController expects incoming
+ // connections to use plain TCP or whether the IngressController expects
+ // PROXY protocol.
+ //
+ // PROXY protocol can be used with load balancers that support it to
+ // communicate the source addresses of client connections when
+ // forwarding those connections to the IngressController. Using PROXY
+ // protocol enables the IngressController to report those source
+ // addresses instead of reporting the load balancer's address in HTTP
+ // headers and logs. Note that enabling PROXY protocol on the
+ // IngressController will cause connections to fail if you are not using
+ // a load balancer that uses PROXY protocol to forward connections to
+ // the IngressController. See
+ // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for
+ // information about PROXY protocol.
+ //
+ // The following values are valid for this field:
+ //
+ // * The empty string.
+ // * "TCP".
+ // * "PROXY".
+ //
+ // The empty string specifies the default, which is TCP without PROXY
+ // protocol. Note that the default is subject to change.
+ //
+ // +kubebuilder:validation:Optional
+ // +optional
+ Protocol IngressControllerProtocol `json:"protocol,omitempty"`
+
+ // httpPort is the port on the host which should be used to listen for
+ // HTTP requests. This field should be set when port 80 is already in use.
+ // The value should not coincide with the NodePort range of the cluster.
+ // When the value is 0 or is not specified it defaults to 80.
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:default=80
+ // +optional
+ HTTPPort int32 `json:"httpPort,omitempty"`
+
+ // httpsPort is the port on the host which should be used to listen for
+ // HTTPS requests. This field should be set when port 443 is already in use.
+ // The value should not coincide with the NodePort range of the cluster.
+ // When the value is 0 or is not specified it defaults to 443.
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:default=443
+ // +optional
+ HTTPSPort int32 `json:"httpsPort,omitempty"`
+
+ // statsPort is the port on the host where the stats from the router are
+ // published. The value should not coincide with the NodePort range of the
+ // cluster. If an external load balancer is configured to forward connections
+ // to this IngressController, the load balancer should use this port for
+ // health checks. The load balancer can send HTTP probes on this port on a
+ // given node, with the path /healthz/ready to determine if the ingress
+ // controller is ready to receive traffic on the node. For proper operation
+ // the load balancer must not forward traffic to a node until the health
+ // check reports ready. The load balancer should also stop forwarding requests
+ // within a maximum of 45 seconds after /healthz/ready starts reporting
+ // not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with
+ // a threshold of two successful or failed requests to become healthy or
+ // unhealthy respectively, are well-tested values. When the value is 0 or
+ // is not specified it defaults to 1936.
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:default=1936
+ // +optional
+ StatsPort int32 `json:"statsPort,omitempty"`
+}
+
+// PrivateStrategy holds parameters for the Private endpoint publishing
+// strategy.
+type PrivateStrategy struct {
+ // protocol specifies whether the IngressController expects incoming
+ // connections to use plain TCP or whether the IngressController expects
+ // PROXY protocol.
+ //
+ // PROXY protocol can be used with load balancers that support it to
+ // communicate the source addresses of client connections when
+ // forwarding those connections to the IngressController. Using PROXY
+ // protocol enables the IngressController to report those source
+ // addresses instead of reporting the load balancer's address in HTTP
+ // headers and logs. Note that enabling PROXY protocol on the
+ // IngressController will cause connections to fail if you are not using
+ // a load balancer that uses PROXY protocol to forward connections to
+ // the IngressController. See
+ // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for
+ // information about PROXY protocol.
+ //
+ // The following values are valid for this field:
+ //
+ // * The empty string.
+ // * "TCP".
+ // * "PROXY".
+ //
+ // The empty string specifies the default, which is TCP without PROXY
+ // protocol. Note that the default is subject to change.
+ //
+ // +kubebuilder:validation:Optional
+ // +optional
+ Protocol IngressControllerProtocol `json:"protocol,omitempty"`
+}
+
+// NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.
+type NodePortStrategy struct {
+ // protocol specifies whether the IngressController expects incoming
+ // connections to use plain TCP or whether the IngressController expects
+ // PROXY protocol.
+ //
+ // PROXY protocol can be used with load balancers that support it to
+ // communicate the source addresses of client connections when
+ // forwarding those connections to the IngressController. Using PROXY
+ // protocol enables the IngressController to report those source
+ // addresses instead of reporting the load balancer's address in HTTP
+ // headers and logs. Note that enabling PROXY protocol on the
+ // IngressController will cause connections to fail if you are not using
+ // a load balancer that uses PROXY protocol to forward connections to
+ // the IngressController. See
+ // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for
+ // information about PROXY protocol.
+ //
+ // The following values are valid for this field:
+ //
+ // * The empty string.
+ // * "TCP".
+ // * "PROXY".
+ //
+ // The empty string specifies the default, which is TCP without PROXY
+ // protocol. Note that the default is subject to change.
+ //
+ // +kubebuilder:validation:Optional
+ // +optional
+ Protocol IngressControllerProtocol `json:"protocol,omitempty"`
+}
+
+// IngressControllerProtocol specifies whether PROXY protocol is enabled or not.
+// +kubebuilder:validation:Enum="";TCP;PROXY
+type IngressControllerProtocol string
+
+const (
+ DefaultProtocol IngressControllerProtocol = ""
+ TCPProtocol IngressControllerProtocol = "TCP"
+ ProxyProtocol IngressControllerProtocol = "PROXY"
+)
+
+// EndpointPublishingStrategy is a way to publish the endpoints of an
+// IngressController, and represents the type and any additional configuration
+// for a specific type.
+// +union
+type EndpointPublishingStrategy struct {
+ // type is the publishing strategy to use. Valid values are:
+ //
+ // * LoadBalancerService
+ //
+ // Publishes the ingress controller using a Kubernetes LoadBalancer Service.
+ //
+ // In this configuration, the ingress controller deployment uses container
+ // networking. A LoadBalancer Service is created to publish the deployment.
+ //
+ // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
+ //
+ // If domain is set, a wildcard DNS record will be managed to point at the
+ // LoadBalancer Service's external name. DNS records are managed only in DNS
+ // zones defined by dns.config.openshift.io/cluster .spec.publicZone and
+ // .spec.privateZone.
+ //
+ // Wildcard DNS management is currently supported only on the AWS, Azure,
+ // and GCP platforms.
+ //
+ // * HostNetwork
+ //
+ // Publishes the ingress controller on node ports where the ingress controller
+ // is deployed.
+ //
+ // In this configuration, the ingress controller deployment uses host
+ // networking, bound to node ports 80 and 443. The user is responsible for
+ // configuring an external load balancer to publish the ingress controller via
+ // the node ports.
+ //
+ // * Private
+ //
+ // Does not publish the ingress controller.
+ //
+ // In this configuration, the ingress controller deployment uses container
+ // networking, and is not explicitly published. The user must manually publish
+ // the ingress controller.
+ //
+ // * NodePortService
+ //
+ // Publishes the ingress controller using a Kubernetes NodePort Service.
+ //
+ // In this configuration, the ingress controller deployment uses container
+ // networking. A NodePort Service is created to publish the deployment. The
+ // specific node ports are dynamically allocated by OpenShift; however, to
+ // support static port allocations, user changes to the node port
+ // field of the managed NodePort Service will preserved.
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ // +required
+ Type EndpointPublishingStrategyType `json:"type"`
+
+ // loadBalancer holds parameters for the load balancer. Present only if
+ // type is LoadBalancerService.
+ // +optional
+ LoadBalancer *LoadBalancerStrategy `json:"loadBalancer,omitempty"`
+
+ // hostNetwork holds parameters for the HostNetwork endpoint publishing
+ // strategy. Present only if type is HostNetwork.
+ // +optional
+ HostNetwork *HostNetworkStrategy `json:"hostNetwork,omitempty"`
+
+ // private holds parameters for the Private endpoint publishing
+ // strategy. Present only if type is Private.
+ // +optional
+ Private *PrivateStrategy `json:"private,omitempty"`
+
+ // nodePort holds parameters for the NodePortService endpoint publishing strategy.
+ // Present only if type is NodePortService.
+ // +optional
+ NodePort *NodePortStrategy `json:"nodePort,omitempty"`
+}
+
+// ClientCertificatePolicy describes the policy for client certificates.
+// +kubebuilder:validation:Enum="";Required;Optional
+type ClientCertificatePolicy string
+
+const (
+ // ClientCertificatePolicyRequired indicates that a client certificate
+ // should be required.
+ ClientCertificatePolicyRequired ClientCertificatePolicy = "Required"
+
+ // ClientCertificatePolicyOptional indicates that a client certificate
+ // should be requested but not required.
+ ClientCertificatePolicyOptional ClientCertificatePolicy = "Optional"
+)
+
+// ClientTLS specifies TLS configuration to enable client-to-server
+// authentication, which can be used for mutual TLS.
+type ClientTLS struct {
+ // clientCertificatePolicy specifies whether the ingress controller
+ // requires clients to provide certificates. This field accepts the
+ // values "Required" or "Optional".
+ //
+ // Note that the ingress controller only checks client certificates for
+ // edge-terminated and reencrypt TLS routes; it cannot check
+ // certificates for cleartext HTTP or passthrough TLS routes.
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ ClientCertificatePolicy ClientCertificatePolicy `json:"clientCertificatePolicy"`
+
+ // clientCA specifies a configmap containing the PEM-encoded CA
+ // certificate bundle that should be used to verify a client's
+ // certificate. The administrator must create this configmap in the
+ // openshift-config namespace.
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ ClientCA configv1.ConfigMapNameReference `json:"clientCA"`
+
+ // allowedSubjectPatterns specifies a list of regular expressions that
+ // should be matched against the distinguished name on a valid client
+ // certificate to filter requests. The regular expressions must use
+ // PCRE syntax. If this list is empty, no filtering is performed. If
+ // the list is nonempty, then at least one pattern must match a client
+ // certificate's distinguished name or else the ingress controller
+ // rejects the certificate and denies the connection.
+ //
+ // +listType=atomic
+ // +optional
+ AllowedSubjectPatterns []string `json:"allowedSubjectPatterns,omitempty"`
+}
+
+// RouteAdmissionPolicy is an admission policy for allowing new route claims.
+type RouteAdmissionPolicy struct {
+ // namespaceOwnership describes how host name claims across namespaces should
+ // be handled.
+ //
+ // Value must be one of:
+ //
+ // - Strict: Do not allow routes in different namespaces to claim the same host.
+ //
+ // - InterNamespaceAllowed: Allow routes to claim different paths of the same
+ // host name across namespaces.
+ //
+ // If empty, the default is Strict.
+ // +optional
+ NamespaceOwnership NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"`
+ // wildcardPolicy describes how routes with wildcard policies should
+ // be handled for the ingress controller. WildcardPolicy controls use
+ // of routes [1] exposed by the ingress controller based on the route's
+ // wildcard policy.
+ //
+ // [1] https://github.com/openshift/api/blob/master/route/v1/types.go
+ //
+ // Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed
+ // will cause admitted routes with a wildcard policy of Subdomain to stop
+ // working. These routes must be updated to a wildcard policy of None to be
+ // readmitted by the ingress controller.
+ //
+ // WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values.
+ //
+ // If empty, defaults to "WildcardsDisallowed".
+ //
+ WildcardPolicy WildcardPolicy `json:"wildcardPolicy,omitempty"`
+}
+
+// WildcardPolicy is a route admission policy component that describes how
+// routes with a wildcard policy should be handled.
+// +kubebuilder:validation:Enum=WildcardsAllowed;WildcardsDisallowed
+type WildcardPolicy string
+
+const (
+ // WildcardPolicyAllowed indicates routes with any wildcard policy are
+ // admitted by the ingress controller.
+ WildcardPolicyAllowed WildcardPolicy = "WildcardsAllowed"
+
+ // WildcardPolicyDisallowed indicates only routes with a wildcard policy
+ // of None are admitted by the ingress controller.
+ WildcardPolicyDisallowed WildcardPolicy = "WildcardsDisallowed"
+)
+
+// NamespaceOwnershipCheck is a route admission policy component that describes
+// how host name claims across namespaces should be handled.
+// +kubebuilder:validation:Enum=InterNamespaceAllowed;Strict
+type NamespaceOwnershipCheck string
+
+const (
+ // InterNamespaceAllowedOwnershipCheck allows routes to claim different paths of the same host name across namespaces.
+ InterNamespaceAllowedOwnershipCheck NamespaceOwnershipCheck = "InterNamespaceAllowed"
+
+ // StrictNamespaceOwnershipCheck does not allow routes to claim the same host name across namespaces.
+ StrictNamespaceOwnershipCheck NamespaceOwnershipCheck = "Strict"
+)
+
+// LoggingDestinationType is a type of destination to which to send log
+// messages.
+//
+// +kubebuilder:validation:Enum=Container;Syslog
+type LoggingDestinationType string
+
+const (
+ // Container sends log messages to a sidecar container.
+ ContainerLoggingDestinationType LoggingDestinationType = "Container"
+
+ // Syslog sends log messages to a syslog endpoint.
+ SyslogLoggingDestinationType LoggingDestinationType = "Syslog"
+
+ // ContainerLoggingSidecarContainerName is the name of the container
+ // with the log output in an ingress controller pod when container
+ // logging is used.
+ ContainerLoggingSidecarContainerName = "logs"
+)
+
+// SyslogLoggingDestinationParameters describes parameters for the Syslog
+// logging destination type.
+type SyslogLoggingDestinationParameters struct {
+ // address is the IP address of the syslog endpoint that receives log
+ // messages.
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Address string `json:"address"`
+
+ // port is the UDP port number of the syslog endpoint that receives log
+ // messages.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ // +required
+ Port uint32 `json:"port"`
+
+ // facility specifies the syslog facility of log messages.
+ //
+ // If this field is empty, the facility is "local1".
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7
+ // +optional
+ Facility string `json:"facility,omitempty"`
+
+ // maxLength is the maximum length of the log message.
+ //
+ // Valid values are integers in the range 480 to 4096, inclusive.
+ //
+ // When omitted, the default value is 1024.
+ //
+ // +kubebuilder:validation:Maximum=4096
+ // +kubebuilder:validation:Minimum=480
+ // +kubebuilder:default=1024
+ // +default:=1024
+ // +optional
+ MaxLength uint32 `json:"maxLength,omitempty"`
+}
+
+// ContainerLoggingDestinationParameters describes parameters for the Container
+// logging destination type.
+type ContainerLoggingDestinationParameters struct {
+ // maxLength is the maximum length of the log message.
+ //
+ // Valid values are integers in the range 480 to 8192, inclusive.
+ //
+ // When omitted, the default value is 1024.
+ //
+ // +kubebuilder:validation:Maximum=8192
+ // +kubebuilder:validation:Minimum=480
+ // +kubebuilder:default=1024
+ // +default:=1024
+ // +optional
+ MaxLength int32 `json:"maxLength,omitempty"`
+}
+
+// LoggingDestination describes a destination for log messages.
+// +union
+type LoggingDestination struct {
+ // type is the type of destination for logs. It must be one of the
+ // following:
+ //
+ // * Container
+ //
+ // The ingress operator configures the sidecar container named "logs" on
+ // the ingress controller pod and configures the ingress controller to
+ // write logs to the sidecar. The logs are then available as container
+ // logs. The expectation is that the administrator configures a custom
+ // logging solution that reads logs from this sidecar. Note that using
+ // container logs means that logs may be dropped if the rate of logs
+ // exceeds the container runtime's or the custom logging solution's
+ // capacity.
+ //
+ // * Syslog
+ //
+ // Logs are sent to a syslog endpoint. The administrator must specify
+ // an endpoint that can receive syslog messages. The expectation is
+ // that the administrator has configured a custom syslog instance.
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ // +required
+ Type LoggingDestinationType `json:"type"`
+
+ // syslog holds parameters for a syslog endpoint. Present only if
+ // type is Syslog.
+ //
+ // +optional
+ Syslog *SyslogLoggingDestinationParameters `json:"syslog,omitempty"`
+
+ // container holds parameters for the Container logging destination.
+ // Present only if type is Container.
+ //
+ // +optional
+ Container *ContainerLoggingDestinationParameters `json:"container,omitempty"`
+}
+
+// IngressControllerCaptureHTTPHeader describes an HTTP header that should be
+// captured.
+type IngressControllerCaptureHTTPHeader struct {
+ // name specifies a header name. Its value must be a valid HTTP header
+ // name as defined in RFC 2616 section 4.2.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$"
+ // +required
+ Name string `json:"name"`
+
+ // maxLength specifies a maximum length for the header value. If a
+ // header value exceeds this length, the value will be truncated in the
+ // log message. Note that the ingress controller may impose a separate
+ // bound on the total length of HTTP headers in a request.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ // +required
+ MaxLength int `json:"maxLength"`
+}
+
+// IngressControllerCaptureHTTPHeaders specifies which HTTP headers the
+// IngressController captures.
+type IngressControllerCaptureHTTPHeaders struct {
+ // request specifies which HTTP request headers to capture.
+ //
+ // If this field is empty, no request headers are captured.
+ //
+ // +nullable
+ // +optional
+ Request []IngressControllerCaptureHTTPHeader `json:"request,omitempty"`
+
+ // response specifies which HTTP response headers to capture.
+ //
+ // If this field is empty, no response headers are captured.
+ //
+ // +nullable
+ // +optional
+ Response []IngressControllerCaptureHTTPHeader `json:"response,omitempty"`
+}
+
+// CookieMatchType indicates the type of matching used against cookie names to
+// select a cookie for capture.
+// +kubebuilder:validation:Enum=Exact;Prefix
+type CookieMatchType string
+
+const (
+ // CookieMatchTypeExact indicates that an exact string match should be
+ // performed.
+ CookieMatchTypeExact CookieMatchType = "Exact"
+ // CookieMatchTypePrefix indicates that a string prefix match should be
+ // performed.
+ CookieMatchTypePrefix CookieMatchType = "Prefix"
+)
+
+// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be
+// captured.
+type IngressControllerCaptureHTTPCookie struct {
+ IngressControllerCaptureHTTPCookieUnion `json:",inline"`
+
+ // maxLength specifies a maximum length of the string that will be
+ // logged, which includes the cookie name, cookie value, and
+ // one-character delimiter. If the log entry exceeds this length, the
+ // value will be truncated in the log message. Note that the ingress
+ // controller may impose a separate bound on the total length of HTTP
+ // headers in a request.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=1024
+ // +required
+ MaxLength int `json:"maxLength"`
+}
+
+// IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured.
+// +union
+type IngressControllerCaptureHTTPCookieUnion struct {
+ // matchType specifies the type of match to be performed on the cookie
+ // name. Allowed values are "Exact" for an exact string match and
+ // "Prefix" for a string prefix match. If "Exact" is specified, a name
+ // must be specified in the name field. If "Prefix" is provided, a
+ // prefix must be specified in the namePrefix field. For example,
+ // specifying matchType "Prefix" and namePrefix "foo" will capture a
+ // cookie named "foo" or "foobar" but not one named "bar". The first
+ // matching cookie is captured.
+ //
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ // +required
+ MatchType CookieMatchType `json:"matchType,omitempty"`
+
+ // name specifies a cookie name. Its value must be a valid HTTP cookie
+ // name as defined in RFC 6265 section 4.1.
+ //
+ // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$"
+ // +kubebuilder:validation:MinLength=0
+ // +kubebuilder:validation:MaxLength=1024
+ // +optional
+ Name string `json:"name"`
+
+ // namePrefix specifies a cookie name prefix. Its value must be a valid
+ // HTTP cookie name as defined in RFC 6265 section 4.1.
+ //
+ // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$"
+ // +kubebuilder:validation:MinLength=0
+ // +kubebuilder:validation:MaxLength=1024
+ // +optional
+ NamePrefix string `json:"namePrefix"`
+}
+
+// LoggingPolicy indicates how an event should be logged.
+// +kubebuilder:validation:Enum=Log;Ignore
+type LoggingPolicy string
+
+const (
+ // LoggingPolicyLog indicates that an event should be logged.
+ LoggingPolicyLog LoggingPolicy = "Log"
+ // LoggingPolicyIgnore indicates that an event should not be logged.
+ LoggingPolicyIgnore LoggingPolicy = "Ignore"
+)
+
+// AccessLogging describes how client requests should be logged.
+type AccessLogging struct {
+ // destination is where access logs go.
+ //
+ // +kubebuilder:validation:Required
+ // +required
+ Destination LoggingDestination `json:"destination"`
+
+ // httpLogFormat specifies the format of the log message for an HTTP
+ // request.
+ //
+ // If this field is empty, log messages use the implementation's default
+ // HTTP log format. For HAProxy's default HTTP log format, see the
+ // HAProxy documentation:
+ // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3
+ //
+ // Note that this format only applies to cleartext HTTP connections
+ // and to secure HTTP connections for which the ingress controller
+ // terminates encryption (that is, edge-terminated or reencrypt
+ // connections). It does not affect the log format for TLS passthrough
+ // connections.
+ //
+ // +optional
+ HttpLogFormat string `json:"httpLogFormat,omitempty"`
+
+ // httpCaptureHeaders defines HTTP headers that should be captured in
+ // access logs. If this field is empty, no headers are captured.
+ //
+ // Note that this option only applies to cleartext HTTP connections
+ // and to secure HTTP connections for which the ingress controller
+ // terminates encryption (that is, edge-terminated or reencrypt
+ // connections). Headers cannot be captured for TLS passthrough
+ // connections.
+ //
+ // +optional
+ HTTPCaptureHeaders IngressControllerCaptureHTTPHeaders `json:"httpCaptureHeaders,omitempty"`
+
+ // httpCaptureCookies specifies HTTP cookies that should be captured in
+ // access logs. If this field is empty, no cookies are captured.
+ //
+ // +nullable
+ // +optional
+ // +kubebuilder:validation:MaxItems=1
+ HTTPCaptureCookies []IngressControllerCaptureHTTPCookie `json:"httpCaptureCookies,omitempty"`
+
+ // logEmptyRequests specifies how connections on which no request is
+ // received should be logged. Typically, these empty requests come from
+ // load balancers' health probes or Web browsers' speculative
+ // connections ("preconnect"), in which case logging these requests may
+ // be undesirable. However, these requests may also be caused by
+ // network errors, in which case logging empty requests may be useful
+ // for diagnosing the errors. In addition, these requests may be caused
+ // by port scans, in which case logging empty requests may aid in
+ // detecting intrusion attempts. Allowed values for this field are
+ // "Log" and "Ignore". The default value is "Log".
+ //
+ // +optional
+ // +kubebuilder:default:="Log"
+ LogEmptyRequests LoggingPolicy `json:"logEmptyRequests,omitempty"`
+}
+
+// IngressControllerLogging describes what should be logged where.
+type IngressControllerLogging struct {
+ // access describes how the client requests should be logged.
+ //
+ // If this field is empty, access logging is disabled.
+ //
+ // +optional
+ Access *AccessLogging `json:"access,omitempty"`
+}
+
+// IngressControllerHTTPHeaderPolicy is a policy for setting HTTP headers.
+//
+// +kubebuilder:validation:Enum=Append;Replace;IfNone;Never
+type IngressControllerHTTPHeaderPolicy string
+
+const (
+ // AppendHTTPHeaderPolicy appends the header, preserving any existing header.
+ AppendHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Append"
+ // ReplaceHTTPHeaderPolicy sets the header, removing any existing header.
+ ReplaceHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Replace"
+ // IfNoneHTTPHeaderPolicy sets the header if it is not already set.
+ IfNoneHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "IfNone"
+ // NeverHTTPHeaderPolicy never sets the header, preserving any existing
+ // header.
+ NeverHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Never"
+)
+
+// IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a
+// unique id header.
+type IngressControllerHTTPUniqueIdHeaderPolicy struct {
+ // name specifies the name of the HTTP header (for example, "unique-id")
+ // that the ingress controller should inject into HTTP requests. The
+ // field's value must be a valid HTTP header name as defined in RFC 2616
+ // section 4.2. If the field is empty, no header is injected.
+ //
+ // +optional
+ // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$"
+ // +kubebuilder:validation:MinLength=0
+ // +kubebuilder:validation:MaxLength=1024
+ Name string `json:"name,omitempty"`
+
+ // format specifies the format for the injected HTTP header's value.
+ // This field has no effect unless name is specified. For the
+ // HAProxy-based ingress controller implementation, this format uses the
+ // same syntax as the HTTP log format. If the field is empty, the
+ // default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the
+ // corresponding HAProxy documentation:
+ // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3
+ //
+ // +optional
+ // +kubebuilder:validation:Pattern="^(%(%|(\\{[-+]?[QXE](,[-+]?[QXE])*\\})?([A-Za-z]+|\\[[.0-9A-Z_a-z]+(\\([^)]+\\))?(,[.0-9A-Z_a-z]+(\\([^)]+\\))?)*\\]))|[^%[:cntrl:]])*$"
+ // +kubebuilder:validation:MinLength=0
+ // +kubebuilder:validation:MaxLength=1024
+ Format string `json:"format,omitempty"`
+}
+
+// IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header
+// (for example, "X-Forwarded-For") in the desired capitalization. The value
+// must be a valid HTTP header name as defined in RFC 2616 section 4.2.
+//
+// +optional
+// +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$"
+// +kubebuilder:validation:MinLength=0
+// +kubebuilder:validation:MaxLength=1024
+type IngressControllerHTTPHeaderNameCaseAdjustment string
+
+// IngressControllerHTTPHeaders specifies how the IngressController handles
+// certain HTTP headers.
+type IngressControllerHTTPHeaders struct {
+ // forwardedHeaderPolicy specifies when and how the IngressController
+ // sets the Forwarded, X-Forwarded-For, X-Forwarded-Host,
+ // X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version
+ // HTTP headers. The value may be one of the following:
+ //
+ // * "Append", which specifies that the IngressController appends the
+ // headers, preserving existing headers.
+ //
+ // * "Replace", which specifies that the IngressController sets the
+ // headers, replacing any existing Forwarded or X-Forwarded-* headers.
+ //
+ // * "IfNone", which specifies that the IngressController sets the
+ // headers if they are not already set.
+ //
+ // * "Never", which specifies that the IngressController never sets the
+ // headers, preserving any existing headers.
+ //
+ // By default, the policy is "Append".
+ //
+ // +optional
+ ForwardedHeaderPolicy IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"`
+
+ // uniqueId describes configuration for a custom HTTP header that the
+ // ingress controller should inject into incoming HTTP requests.
+ // Typically, this header is configured to have a value that is unique
+ // to the HTTP request. The header can be used by applications or
+ // included in access logs to facilitate tracing individual HTTP
+ // requests.
+ //
+ // If this field is empty, no such header is injected into requests.
+ //
+ // +optional
+ UniqueId IngressControllerHTTPUniqueIdHeaderPolicy `json:"uniqueId,omitempty"`
+
+ // headerNameCaseAdjustments specifies case adjustments that can be
+ // applied to HTTP header names. Each adjustment is specified as an
+ // HTTP header name with the desired capitalization. For example,
+ // specifying "X-Forwarded-For" indicates that the "x-forwarded-for"
+ // HTTP header should be adjusted to have the specified capitalization.
+ //
+ // These adjustments are only applied to cleartext, edge-terminated, and
+ // re-encrypt routes, and only when using HTTP/1.
+ //
+ // For request headers, these adjustments are applied only for routes
+ // that have the haproxy.router.openshift.io/h1-adjust-case=true
+ // annotation. For response headers, these adjustments are applied to
+ // all HTTP responses.
+ //
+ // If this field is empty, no request headers are adjusted.
+ //
+ // +nullable
+ // +optional
+ HeaderNameCaseAdjustments []IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"`
+
+ // actions specifies options for modifying headers and their values.
+ // Note that this option only applies to cleartext HTTP connections
+ // and to secure HTTP connections for which the ingress controller
+ // terminates encryption (that is, edge-terminated or reencrypt
+ // connections). Headers cannot be modified for TLS passthrough
+ // connections.
+ // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security`
+ // may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in
+ // accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies.
+ // Any actions defined here are applied after any actions related to the following other fields:
+ // cache-control, spec.clientTLS,
+ // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId,
+ // and spec.httpHeaders.headerNameCaseAdjustments.
+ // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after
+ // the actions specified in the IngressController's spec.httpHeaders.actions field.
+ // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be
+ // executed after the actions specified in the Route's spec.httpHeaders.actions field.
+ // Headers set using this API cannot be captured for use in access logs.
+ // The following header names are reserved and may not be modified via this API:
+ // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie.
+ // Note that the total size of all net added headers *after* interpolating dynamic values
+ // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the
+ // IngressController. Please refer to the documentation
+ // for that API field for more details.
+ // +optional
+ Actions IngressControllerHTTPHeaderActions `json:"actions,omitempty"`
+}
+
+// IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers.
+type IngressControllerHTTPHeaderActions struct {
+ // response is a list of HTTP response headers to modify.
+ // Actions defined here will modify the response headers of all requests passing through an ingress controller.
+ // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster.
+ // IngressController actions for response headers will be executed after Route actions.
+ // Currently, actions may define to either `Set` or `Delete` headers values.
+ // Actions are applied in sequence as defined in this list.
+ // A maximum of 20 response header actions may be configured.
+ // Sample fetchers allowed are "res.hdr" and "ssl_c_der".
+ // Converters allowed are "lower" and "base64".
+ // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ // +kubebuilder:validation:MaxItems=20
+ // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64."
+ Response []IngressControllerHTTPHeader `json:"response"`
+ // request is a list of HTTP request headers to modify.
+ // Actions defined here will modify the request headers of all requests passing through an ingress controller.
+ // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster.
+ // IngressController actions for request headers will be executed before Route actions.
+ // Currently, actions may define to either `Set` or `Delete` headers values.
+ // Actions are applied in sequence as defined in this list.
+ // A maximum of 20 request header actions may be configured.
+ // Sample fetchers allowed are "req.hdr" and "ssl_c_der".
+ // Converters allowed are "lower" and "base64".
+ // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".
+ // + ---
+ // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa.
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ // +kubebuilder:validation:MaxItems=20
+ // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64."
+ Request []IngressControllerHTTPHeader `json:"request"`
+}
+
+// IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header.
+type IngressControllerHTTPHeader struct {
+ // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header
+ // name as defined in RFC 2616 section 4.2.
+ // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`".
+ // The following header names are reserved and may not be modified via this API:
+ // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie.
+ // It must be no more than 255 characters in length.
+ // Header name must be unique.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=255
+ // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$"
+ // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions"
+ // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions"
+ // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'host'",message="host header may not be modified via header actions"
+ // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions"
+ // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions"
+ Name string `json:"name"`
+ // action specifies actions to perform on headers, such as setting or deleting headers.
+ // +kubebuilder:validation:Required
+ Action IngressControllerHTTPHeaderActionUnion `json:"action"`
+}
+
+// IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header.
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise"
+// +union
+type IngressControllerHTTPHeaderActionUnion struct {
+ // type defines the type of the action to be applied on the header.
+ // Possible values are Set or Delete.
+ // Set allows you to set HTTP request and response headers.
+ // Delete allows you to delete HTTP request and response headers.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Enum:=Set;Delete
+ // +kubebuilder:validation:Required
+ Type IngressControllerHTTPHeaderActionType `json:"type"`
+
+ // set specifies how the HTTP header should be set.
+ // This field is required when type is Set and forbidden otherwise.
+ // +optional
+ // +unionMember
+ Set *IngressControllerSetHTTPHeader `json:"set,omitempty"`
+}
+
+// IngressControllerHTTPHeaderActionType defines actions that can be performed on HTTP headers.
+type IngressControllerHTTPHeaderActionType string
+
+const (
+ // Set specifies that an HTTP header should be set.
+ Set IngressControllerHTTPHeaderActionType = "Set"
+ // Delete specifies that an HTTP header should be deleted.
+ Delete IngressControllerHTTPHeaderActionType = "Delete"
+)
+
+// IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header.
+type IngressControllerSetHTTPHeader struct {
+ // value specifies a header value.
+ // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in
+ // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and
+ // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2.
+ // The value of this field must be no more than 16384 characters in length.
+ // Note that the total size of all net added headers *after* interpolating dynamic values
+ // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the
+ // IngressController.
+ // + ---
+ // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit.
+ // + See .
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:MaxLength=16384
+ Value string `json:"value"`
+}
+
+// IngressControllerTuningOptions specifies options for tuning the performance
+// of ingress controller pods
+type IngressControllerTuningOptions struct {
+ // headerBufferBytes describes how much memory should be reserved
+ // (in bytes) for IngressController connection sessions.
+ // Note that this value must be at least 16384 if HTTP/2 is
+ // enabled for the IngressController (https://tools.ietf.org/html/rfc7540).
+ // If this field is empty, the IngressController will use a default value
+ // of 32768 bytes.
+ //
+ // Setting this field is generally not recommended as headerBufferBytes
+ // values that are too small may break the IngressController and
+ // headerBufferBytes values that are too large could cause the
+ // IngressController to use significantly more memory than necessary.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=16384
+ // +optional
+ HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"`
+
+ // headerBufferMaxRewriteBytes describes how much memory should be reserved
+ // (in bytes) from headerBufferBytes for HTTP header rewriting
+ // and appending for IngressController connection sessions.
+ // Note that incoming HTTP requests will be limited to
+ // (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning
+ // headerBufferBytes must be greater than headerBufferMaxRewriteBytes.
+ // If this field is empty, the IngressController will use a default value
+ // of 8192 bytes.
+ //
+ // Setting this field is generally not recommended as
+ // headerBufferMaxRewriteBytes values that are too small may break the
+ // IngressController and headerBufferMaxRewriteBytes values that are too
+ // large could cause the IngressController to use significantly more memory
+ // than necessary.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=4096
+ // +optional
+ HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"`
+
+ // threadCount defines the number of threads created per HAProxy process.
+ // Creating more threads allows each ingress controller pod to handle more
+ // connections, at the cost of more system resources being used. HAProxy
+ // currently supports up to 64 threads. If this field is empty, the
+ // IngressController will use the default value. The current default is 4
+ // threads, but this may change in future releases.
+ //
+ // Setting this field is generally not recommended. Increasing the number
+ // of HAProxy threads allows ingress controller pods to utilize more CPU
+ // time under load, potentially starving other pods if set too high.
+ // Reducing the number of threads may cause the ingress controller to
+ // perform poorly.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=64
+ // +optional
+ ThreadCount int32 `json:"threadCount,omitempty"`
+
+ // clientTimeout defines how long a connection will be held open while
+ // waiting for a client response.
+ //
+ // If unset, the default timeout is 30s
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Format=duration
+ // +optional
+ ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"`
+
+ // clientFinTimeout defines how long a connection will be held open while
+ // waiting for the client response to the server/backend closing the
+ // connection.
+ //
+ // If unset, the default timeout is 1s
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Format=duration
+ // +optional
+ ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"`
+
+ // serverTimeout defines how long a connection will be held open while
+ // waiting for a server/backend response.
+ //
+ // If unset, the default timeout is 30s
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Format=duration
+ // +optional
+ ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"`
+
+ // serverFinTimeout defines how long a connection will be held open while
+ // waiting for the server/backend response to the client closing the
+ // connection.
+ //
+ // If unset, the default timeout is 1s
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Format=duration
+ // +optional
+ ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"`
+
+ // tunnelTimeout defines how long a tunnel connection (including
+ // websockets) will be held open while the tunnel is idle.
+ //
+ // If unset, the default timeout is 1h
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Format=duration
+ // +optional
+ TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"`
+
+ // ConnectTimeout defines the maximum time to wait for
+ // a connection attempt to a server/backend to succeed.
+ //
+ // This field expects an unsigned duration string of decimal numbers, each with optional
+ // fraction and a unit suffix, e.g. "300ms", "1.5h" or "2h45m".
+ // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h".
+ //
+ // When omitted, this means the user has no opinion and the platform is left
+ // to choose a reasonable default. This default is subject to change over time.
+ // The current default is 5s.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$
+ // +kubebuilder:validation:Type:=string
+ // +optional
+ ConnectTimeout *metav1.Duration `json:"connectTimeout,omitempty"`
+
+ // tlsInspectDelay defines how long the router can hold data to find a
+ // matching route.
+ //
+ // Setting this too short can cause the router to fall back to the default
+ // certificate for edge-terminated or reencrypt routes even when a better
+ // matching certificate could be used.
+ //
+ // If unset, the default inspect delay is 5s
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Format=duration
+ // +optional
+ TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"`
+
+ // healthCheckInterval defines how long the router waits between two consecutive
+ // health checks on its configured backends. This value is applied globally as
+ // a default for all routes, but may be overridden per-route by the route annotation
+ // "router.openshift.io/haproxy.health.check.interval".
+ //
+ // Expects an unsigned duration string of decimal numbers, each with optional
+ // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m".
+ // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h".
+ //
+ // Setting this to less than 5s can cause excess traffic due to too frequent
+ // TCP health checks and accompanying SYN packet storms. Alternatively, setting
+ // this too high can result in increased latency, due to backend servers that are no
+ // longer available, but haven't yet been detected as such.
+ //
+ // An empty or zero healthCheckInterval means no opinion and IngressController chooses
+ // a default, which is subject to change over time.
+ // Currently the default healthCheckInterval value is 5s.
+ //
+ // Currently the minimum allowed value is 1s and the maximum allowed value is
+ // 2147483647ms (24.85 days). Both are subject to change over time.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$
+ // +kubebuilder:validation:Type:=string
+ // +optional
+ HealthCheckInterval *metav1.Duration `json:"healthCheckInterval,omitempty"`
+
+ // maxConnections defines the maximum number of simultaneous
+ // connections that can be established per HAProxy process.
+ // Increasing this value allows each ingress controller pod to
+ // handle more connections but at the cost of additional
+ // system resources being consumed.
+ //
+ // Permitted values are: empty, 0, -1, and the range
+ // 2000-2000000.
+ //
+ // If this field is empty or 0, the IngressController will use
+ // the default value of 50000, but the default is subject to
+ // change in future releases.
+ //
+ // If the value is -1 then HAProxy will dynamically compute a
+ // maximum value based on the available ulimits in the running
+ // container. Selecting -1 (i.e., auto) will result in a large
+ // value being computed (~520000 on OpenShift >=4.10 clusters)
+ // and therefore each HAProxy process will incur significant
+ // memory usage compared to the current default of 50000.
+ //
+ // Setting a value that is greater than the current operating
+ // system limit will prevent the HAProxy process from
+ // starting.
+ //
+ // If you choose a discrete value (e.g., 750000) and the
+ // router pod is migrated to a new node, there's no guarantee
+ // that that new node has identical ulimits configured. In
+ // such a scenario the pod would fail to start. If you have
+ // nodes with different ulimits configured (e.g., different
+ // tuned profiles) and you choose a discrete value then the
+ // guidance is to use -1 and let the value be computed
+ // dynamically at runtime.
+ //
+ // You can monitor memory usage for router containers with the
+ // following metric:
+ // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}'.
+ //
+ // You can monitor memory usage of individual HAProxy
+ // processes in router containers with the following metric:
+ // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}/container_processes{container="router",namespace="openshift-ingress"}'.
+ //
+ // +kubebuilder:validation:Optional
+ // +optional
+ MaxConnections int32 `json:"maxConnections,omitempty"`
+
+ // reloadInterval defines the minimum interval at which the router is allowed to reload
+ // to accept new changes. Increasing this value can prevent the accumulation of
+ // HAProxy processes, depending on the scenario. Increasing this interval can
+ // also lessen load imbalance on a backend's servers when using the roundrobin
+ // balancing algorithm. Alternatively, decreasing this value may decrease latency
+ // since updates to HAProxy's configuration can take effect more quickly.
+ //
+ // The value must be a time duration value; see .
+ // Currently, the minimum value allowed is 1s, and the maximum allowed value is
+ // 120s. Minimum and maximum allowed values may change in future versions of OpenShift.
+ // Note that if a duration outside of these bounds is provided, the value of reloadInterval
+ // will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to
+ // 120s; the IngressController will not reject and replace this disallowed value with
+ // the default).
+ //
+ // A zero value for reloadInterval tells the IngressController to choose the default,
+ // which is currently 5s and subject to change without notice.
+ //
+ // This field expects an unsigned duration string of decimal numbers, each with optional
+ // fraction and a unit suffix, e.g. "300ms", "1.5h" or "2h45m".
+ // Valid time units are "ns", "us" (or "µs" U+00B5 or "μs" U+03BC), "ms", "s", "m", "h".
+ //
+ // Note: Setting a value significantly larger than the default of 5s can cause latency
+ // in observing updates to routes and their endpoints. HAProxy's configuration will
+ // be reloaded less frequently, and newly created routes will not be served until the
+ // subsequent reload.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$
+ // +kubebuilder:validation:Type:=string
+ // +optional
+ ReloadInterval metav1.Duration `json:"reloadInterval,omitempty"`
+}
+
+// HTTPEmptyRequestsPolicy indicates how HTTP connections for which no request
+// is received should be handled.
+// +kubebuilder:validation:Enum=Respond;Ignore
+type HTTPEmptyRequestsPolicy string
+
+const (
+ // HTTPEmptyRequestsPolicyRespond indicates that the ingress controller
+ // should respond to empty requests.
+ HTTPEmptyRequestsPolicyRespond HTTPEmptyRequestsPolicy = "Respond"
+ // HTTPEmptyRequestsPolicyIgnore indicates that the ingress controller
+ // should ignore empty requests.
+ HTTPEmptyRequestsPolicyIgnore HTTPEmptyRequestsPolicy = "Ignore"
+)
+
+var (
+ // Available indicates the ingress controller deployment is available.
+ IngressControllerAvailableConditionType = "Available"
+ // LoadBalancerManaged indicates the management status of any load balancer
+ // service associated with an ingress controller.
+ LoadBalancerManagedIngressConditionType = "LoadBalancerManaged"
+ // LoadBalancerReady indicates the ready state of any load balancer service
+ // associated with an ingress controller.
+ LoadBalancerReadyIngressConditionType = "LoadBalancerReady"
+ // DNSManaged indicates the management status of any DNS records for the
+ // ingress controller.
+ DNSManagedIngressConditionType = "DNSManaged"
+ // DNSReady indicates the ready state of any DNS records for the ingress
+ // controller.
+ DNSReadyIngressConditionType = "DNSReady"
+)
+
+// IngressControllerStatus defines the observed status of the IngressController.
+type IngressControllerStatus struct {
+ // availableReplicas is number of observed available replicas according to the
+ // ingress controller deployment.
+ AvailableReplicas int32 `json:"availableReplicas"`
+
+ // selector is a label selector, in string format, for ingress controller pods
+ // corresponding to the IngressController. The number of matching pods should
+ // equal the value of availableReplicas.
+ Selector string `json:"selector"`
+
+ // domain is the actual domain in use.
+ Domain string `json:"domain"`
+
+ // endpointPublishingStrategy is the actual strategy in use.
+ EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"`
+
+ // conditions is a list of conditions and their status.
+ //
+ // Available means the ingress controller deployment is available and
+ // servicing route and ingress resources (i.e, .status.availableReplicas
+ // equals .spec.replicas)
+ //
+ // There are additional conditions which indicate the status of other
+ // ingress controller features and capabilities.
+ //
+ // * LoadBalancerManaged
+ // - True if the following conditions are met:
+ // * The endpoint publishing strategy requires a service load balancer.
+ // - False if any of those conditions are unsatisfied.
+ //
+ // * LoadBalancerReady
+ // - True if the following conditions are met:
+ // * A load balancer is managed.
+ // * The load balancer is ready.
+ // - False if any of those conditions are unsatisfied.
+ //
+ // * DNSManaged
+ // - True if the following conditions are met:
+ // * The endpoint publishing strategy and platform support DNS.
+ // * The ingress controller domain is set.
+ // * dns.config.openshift.io/cluster configures DNS zones.
+ // - False if any of those conditions are unsatisfied.
+ //
+ // * DNSReady
+ // - True if the following conditions are met:
+ // * DNS is managed.
+ // * DNS records have been successfully created.
+ // - False if any of those conditions are unsatisfied.
+ Conditions []OperatorCondition `json:"conditions,omitempty"`
+
+ // tlsProfile is the TLS connection configuration that is in effect.
+ // +optional
+ TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"`
+
+ // observedGeneration is the most recent generation observed.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // namespaceSelector is the actual namespaceSelector in use.
+ // +optional
+ NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
+
+ // routeSelector is the actual routeSelector in use.
+ // +optional
+ RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IngressControllerList contains a list of IngressControllers.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type IngressControllerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []IngressController `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_insights.go b/vendor/github.com/openshift/api/operator/v1/types_insights.go
new file mode 100644
index 0000000000..56e2b51c14
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_insights.go
@@ -0,0 +1,156 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=insightsoperators,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1237
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=insights,operatorOrdering=00
+//
+// InsightsOperator holds cluster-wide information about the Insights Operator.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type InsightsOperator struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the specification of the desired behavior of the Insights.
+ // +kubebuilder:validation:Required
+ Spec InsightsOperatorSpec `json:"spec"`
+
+ // status is the most recently observed status of the Insights operator.
+ // +optional
+ Status InsightsOperatorStatus `json:"status"`
+}
+
+type InsightsOperatorSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type InsightsOperatorStatus struct {
+ OperatorStatus `json:",inline"`
+ // gatherStatus provides basic information about the last Insights data gathering.
+ // When omitted, this means no data gathering has taken place yet.
+ // +optional
+ GatherStatus GatherStatus `json:"gatherStatus,omitempty"`
+ // insightsReport provides general Insights analysis results.
+ // When omitted, this means no data gathering has taken place yet.
+ // +optional
+ InsightsReport InsightsReport `json:"insightsReport,omitempty"`
+}
+
+// gatherStatus provides information about the last known gather event.
+type GatherStatus struct {
+ // lastGatherTime is the last time when Insights data gathering finished.
+ // An empty value means that no data has been gathered yet.
+ // +optional
+ LastGatherTime metav1.Time `json:"lastGatherTime,omitempty"`
+ // lastGatherDuration is the total time taken to process
+ // all gatherers during the last gather event.
+ // +optional
+ // +kubebuilder:validation:Pattern="^0|([1-9][0-9]*(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
+ // +kubebuilder:validation:Type=string
+ LastGatherDuration metav1.Duration `json:"lastGatherDuration,omitempty"`
+ // gatherers is a list of active gatherers (and their statuses) in the last gathering.
+ // +listType=atomic
+ // +optional
+ Gatherers []GathererStatus `json:"gatherers,omitempty"`
+}
+
+// insightsReport provides Insights health check report based on the most
+// recently sent Insights data.
+type InsightsReport struct {
+ // downloadedAt is the time when the last Insights report was downloaded.
+ // An empty value means that there has not been any Insights report downloaded yet and
+ // it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled).
+ // +optional
+ DownloadedAt metav1.Time `json:"downloadedAt,omitempty"`
+ // healthChecks provides basic information about active Insights health checks
+ // in a cluster.
+ // +listType=atomic
+ // +optional
+ HealthChecks []HealthCheck `json:"healthChecks,omitempty"`
+}
+
+// healthCheck represents an Insights health check attributes.
+type HealthCheck struct {
+ // description provides basic description of the healtcheck.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=2048
+ // +kubebuilder:validation:MinLength=10
+ Description string `json:"description"`
+ // totalRisk of the healthcheck. Indicator of the total risk posed
+ // by the detected issue; combination of impact and likelihood. The values can be from 1 to 4,
+ // and the higher the number, the more important the issue.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=4
+ TotalRisk int32 `json:"totalRisk"`
+ // advisorURI provides the URL link to the Insights Advisor.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^https:\/\/\S+`
+ AdvisorURI string `json:"advisorURI"`
+ // state determines what the current state of the health check is.
+ // Health check is enabled by default and can be disabled
+ // by the user in the Insights advisor user interface.
+ // +kubebuilder:validation:Required
+ State HealthCheckState `json:"state"`
+}
+
+// healthCheckState provides information about the status of the
+// health check (for example, the health check may be marked as disabled by the user).
+// +kubebuilder:validation:Enum:=Enabled;Disabled
+type HealthCheckState string
+
+const (
+ // enabled marks the health check as enabled
+ HealthCheckEnabled HealthCheckState = "Enabled"
+ // disabled marks the health check as disabled
+ HealthCheckDisabled HealthCheckState = "Disabled"
+)
+
+// gathererStatus represents information about a particular
+// data gatherer.
+type GathererStatus struct {
+ // conditions provide details on the status of each gatherer.
+ // +listType=atomic
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ Conditions []metav1.Condition `json:"conditions"`
+ // name is the name of the gatherer.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MaxLength=256
+ // +kubebuilder:validation:MinLength=5
+ Name string `json:"name"`
+ // lastGatherDuration represents the time spent gathering.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([1-9][0-9]*(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
+ LastGatherDuration metav1.Duration `json:"lastGatherDuration"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InsightsOperatorList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type InsightsOperatorList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []InsightsOperator `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go
new file mode 100644
index 0000000000..5c9d43a2a2
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go
@@ -0,0 +1,83 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=kubeapiservers,scope=Cluster,categories=coreoperators
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_20,operatorName=kube-apiserver,operatorOrdering=01
+
+// KubeAPIServer provides information to configure an operator to manage kube-apiserver.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +openshift:compatibility-gen:level=1
+type KubeAPIServer struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the specification of the desired behavior of the Kubernetes API Server
+ // +kubebuilder:validation:Required
+ // +required
+ Spec KubeAPIServerSpec `json:"spec"`
+
+ // status is the most recently observed status of the Kubernetes API Server
+ // +optional
+ Status KubeAPIServerStatus `json:"status"`
+}
+
+type KubeAPIServerSpec struct {
+ StaticPodOperatorSpec `json:",inline"`
+}
+
+type KubeAPIServerStatus struct {
+ StaticPodOperatorStatus `json:",inline"`
+
+ // serviceAccountIssuers tracks history of used service account issuers.
+ // The item without expiration time represents the currently used service account issuer.
+ // The other items represents service account issuers that were used previously and are still being trusted.
+ // The default expiration for the items is set by the platform and it defaults to 24h.
+ // see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection
+ // +optional
+ ServiceAccountIssuers []ServiceAccountIssuerStatus `json:"serviceAccountIssuers,omitempty"`
+}
+
+type ServiceAccountIssuerStatus struct {
+ // name is the name of the service account issuer
+ // ---
+ // + This value comes from the serviceAccountIssuer field on the authentication.config.openshift.io/v1 resource.
+ // + As the authentication field is not validated, we cannot apply validation here else this may cause the controller
+ // + to error when trying to update this status field.
+ Name string `json:"name"`
+
+ // expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list
+ // of service account issuers.
+ // +optional
+ ExpirationTime *metav1.Time `json:"expirationTime,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KubeAPIServerList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type KubeAPIServerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []KubeAPIServer `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go
new file mode 100644
index 0000000000..93ab209a0d
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go
@@ -0,0 +1,68 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=kubecontrollermanagers,scope=Cluster,categories=coreoperators
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=kube-controller-manager,operatorOrdering=01
+
+// KubeControllerManager provides information to configure an operator to manage kube-controller-manager.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type KubeControllerManager struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the specification of the desired behavior of the Kubernetes Controller Manager
+ // +kubebuilder:validation:Required
+ // +required
+ Spec KubeControllerManagerSpec `json:"spec"`
+
+ // status is the most recently observed status of the Kubernetes Controller Manager
+ // +optional
+ Status KubeControllerManagerStatus `json:"status"`
+}
+
+type KubeControllerManagerSpec struct {
+ StaticPodOperatorSpec `json:",inline"`
+
+ // useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only
+ // enough certificates to validate service serving certificates.
+ // Once set to true, it cannot be set to false.
+ // Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will
+ // only have the more secure content.
+ // +kubebuilder:default=false
+ UseMoreSecureServiceCA bool `json:"useMoreSecureServiceCA"`
+}
+
+type KubeControllerManagerStatus struct {
+ StaticPodOperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KubeControllerManagerList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type KubeControllerManagerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []KubeControllerManager `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go
new file mode 100644
index 0000000000..470dc5097d
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go
@@ -0,0 +1,57 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=kubestorageversionmigrators,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/503
+// +openshift:file-pattern=cvoRunLevel=0000_40,operatorName=kube-storage-version-migrator,operatorOrdering=00
+
+// KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type KubeStorageVersionMigrator struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec KubeStorageVersionMigratorSpec `json:"spec"`
+ // +optional
+ Status KubeStorageVersionMigratorStatus `json:"status"`
+}
+
+type KubeStorageVersionMigratorSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type KubeStorageVersionMigratorStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KubeStorageVersionMigratorList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type KubeStorageVersionMigratorList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []KubeStorageVersionMigrator `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go
new file mode 100644
index 0000000000..8bd41eb69d
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go
@@ -0,0 +1,509 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=machineconfigurations,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1453
+// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01
+
+// MachineConfiguration provides information to configure an operator to manage Machine Configuration.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type MachineConfiguration struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the specification of the desired behavior of the Machine Config Operator
+ // +kubebuilder:validation:Required
+ Spec MachineConfigurationSpec `json:"spec"`
+
+ // status is the most recently observed status of the Machine Config Operator
+ // +optional
+ Status MachineConfigurationStatus `json:"status"`
+}
+
+type MachineConfigurationSpec struct {
+ StaticPodOperatorSpec `json:",inline"`
+
+ // TODO(jkyros): This is where we put our knobs and dials
+
+ // managedBootImages allows configuration for the management of boot images for machine
+ // resources within the cluster. This configuration allows users to select resources that should
+ // be updated to the latest boot images during cluster upgrades, ensuring that new machines
+ // always boot with the current cluster version's boot image. When omitted, no boot images
+ // will be updated.
+ // +openshift:enable:FeatureGate=ManagedBootImages
+ // +optional
+ ManagedBootImages ManagedBootImages `json:"managedBootImages"`
+
+ // nodeDisruptionPolicy allows an admin to set granular node disruption actions for
+ // MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow
+ // for less downtime when doing small configuration updates to the cluster. This configuration
+ // has no effect on cluster upgrades which will still incur node disruption where required.
+ // +openshift:enable:FeatureGate=NodeDisruptionPolicy
+ // +optional
+ NodeDisruptionPolicy NodeDisruptionPolicyConfig `json:"nodeDisruptionPolicy"`
+}
+
+type MachineConfigurationStatus struct {
+ // observedGeneration is the last generation change you've dealt with
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // conditions is a list of conditions and their status
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // Previously there was a StaticPodOperatorStatus here for legacy reasons. Many of the fields within
+ // it are no longer relevant for the MachineConfiguration CRD's functions. The following remainder
+ // fields were tombstoned after lifting out StaticPodOperatorStatus. To avoid conflicts with
+ // serialisation, the following field names may never be used again.
+
+ // Tombstone: legacy field from StaticPodOperatorStatus
+ // Version string `json:"version,omitempty"`
+
+ // Tombstone: legacy field from StaticPodOperatorStatus
+ // ReadyReplicas int32 `json:"readyReplicas"`
+
+ // Tombstone: legacy field from StaticPodOperatorStatus
+ // Generations []GenerationStatus `json:"generations,omitempty"`
+
+ // Tombstone: legacy field from StaticPodOperatorStatus
+ // LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"`
+
+ // Tombstone: legacy field from StaticPodOperatorStatus
+ // LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitempty"`
+
+ // Tombstone: legacy field from StaticPodOperatorStatus
+ // NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"`
+
+ // nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are,
+ // and will be used by the Machine Config Daemon during future node updates.
+ // +openshift:enable:FeatureGate=NodeDisruptionPolicy
+ // +optional
+ NodeDisruptionPolicyStatus NodeDisruptionPolicyStatus `json:"nodeDisruptionPolicyStatus"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// MachineConfigurationList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type MachineConfigurationList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []MachineConfiguration `json:"items"`
+}
+
+type ManagedBootImages struct {
+ // machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator
+ // will watch for changes to this list. Only one entry is permitted per type of machine management resource.
+ // +optional
+ // +listType=map
+ // +listMapKey=resource
+ // +listMapKey=apiGroup
+ MachineManagers []MachineManager `json:"machineManagers"`
+}
+
+// MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information
+// such as the resource type and the API Group of the resource. It also provides granular control via the selection field.
+type MachineManager struct {
+ // resource is the machine management resource's type.
+ // The only current valid value is machinesets.
+ // machinesets means that the machine manager will only register resources of the kind MachineSet.
+ // +kubebuilder:validation:Required
+ Resource MachineManagerMachineSetsResourceType `json:"resource"`
+
+ // apiGroup is name of the APIGroup that the machine management resource belongs to.
+ // The only current valid value is machine.openshift.io.
+ // machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group.
+ // +kubebuilder:validation:Required
+ APIGroup MachineManagerMachineSetsAPIGroupType `json:"apiGroup"`
+
+ // selection allows granular control of the machine management resources that will be registered for boot image updates.
+ // +kubebuilder:validation:Required
+ Selection MachineManagerSelector `json:"selection"`
+}
+
+// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Partial' ? has(self.partial) : !has(self.partial)",message="Partial is required when type is partial, and forbidden otherwise"
+// +union
+type MachineManagerSelector struct {
+ // mode determines how machine managers will be selected for updates.
+ // Valid values are All and Partial.
+ // All means that every resource matched by the machine manager will be updated.
+ // Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated.
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ Mode MachineManagerSelectorMode `json:"mode"`
+
+ // partial provides label selector(s) that can be used to match machine management resources.
+ // Only permitted when mode is set to "Partial".
+ // +optional
+ Partial *PartialSelector `json:"partial,omitempty"`
+}
+
+// PartialSelector provides label selector(s) that can be used to match machine management resources.
+type PartialSelector struct {
+ // machineResourceSelector is a label selector that can be used to select machine resources like MachineSets.
+ // +kubebuilder:validation:Required
+ MachineResourceSelector *metav1.LabelSelector `json:"machineResourceSelector,omitempty"`
+}
+
+// MachineManagerSelectorMode is a string enum used in the MachineManagerSelector union discriminator.
+// +kubebuilder:validation:Enum:="All";"Partial"
+type MachineManagerSelectorMode string
+
+const (
+ // All represents a configuration mode that registers all resources specified by the parent MachineManager for boot image updates.
+ All MachineManagerSelectorMode = "All"
+
+ // Partial represents a configuration mode that will register resources specified by the parent MachineManager only
+ // if they match with the label selector.
+ Partial MachineManagerSelectorMode = "Partial"
+)
+
+// MachineManagerManagedResourceType is a string enum used in the MachineManager type to describe the resource
+// type to be registered.
+// +kubebuilder:validation:Enum:="machinesets"
+type MachineManagerMachineSetsResourceType string
+
+const (
+ // MachineSets represent the MachineSet resource type, which manage a group of machines and belong to the Openshift machine API group.
+ MachineSets MachineManagerMachineSetsResourceType = "machinesets"
+)
+
+// MachineManagerManagedAPIGroupType is a string enum used in in the MachineManager type to describe the APIGroup
+// of the resource type being registered.
+// +kubebuilder:validation:Enum:="machine.openshift.io"
+type MachineManagerMachineSetsAPIGroupType string
+
+const (
+ // MachineAPI represent the traditional MAPI Group that a machineset may belong to.
+ // This feature only supports MAPI machinesets at this time.
+ MachineAPI MachineManagerMachineSetsAPIGroupType = "machine.openshift.io"
+)
+
+type NodeDisruptionPolicyStatus struct {
+ // clusterPolicies is a merge of cluster default and user provided node disruption policies.
+ // +optional
+ ClusterPolicies NodeDisruptionPolicyClusterStatus `json:"clusterPolicies"`
+}
+
+// NodeDisruptionPolicyConfig is the overall spec definition for files/units/sshkeys
+type NodeDisruptionPolicyConfig struct {
+ // files is a list of MachineConfig file definitions and actions to take to changes on those paths
+ // This list supports a maximum of 50 entries.
+ // +optional
+ // +listType=map
+ // +listMapKey=path
+ // +kubebuilder:validation:MaxItems=50
+ Files []NodeDisruptionPolicySpecFile `json:"files"`
+ // units is a list MachineConfig unit definitions and actions to take on changes to those services
+ // This list supports a maximum of 50 entries.
+ // +optional
+ // +listType=map
+ // +listMapKey=name
+ // +kubebuilder:validation:MaxItems=50
+ Units []NodeDisruptionPolicySpecUnit `json:"units"`
+ // sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this
+ // will apply to all sshkey changes in the cluster
+ // +optional
+ SSHKey NodeDisruptionPolicySpecSSHKey `json:"sshkey"`
+}
+
+// NodeDisruptionPolicyClusterStatus is the type for the status object, rendered by the controller as a
+// merge of cluster defaults and user provided policies
+type NodeDisruptionPolicyClusterStatus struct {
+ // files is a list of MachineConfig file definitions and actions to take to changes on those paths
+ // +optional
+ // +listType=map
+ // +listMapKey=path
+ // +kubebuilder:validation:MaxItems=100
+ Files []NodeDisruptionPolicyStatusFile `json:"files,omitempty"`
+ // units is a list MachineConfig unit definitions and actions to take on changes to those services
+ // +optional
+ // +listType=map
+ // +listMapKey=name
+ // +kubebuilder:validation:MaxItems=100
+ Units []NodeDisruptionPolicyStatusUnit `json:"units,omitempty"`
+ // sshkey is the overall sshkey MachineConfig definition
+ // +optional
+ SSHKey NodeDisruptionPolicyStatusSSHKey `json:"sshkey,omitempty"`
+}
+
+// NodeDisruptionPolicySpecFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object
+type NodeDisruptionPolicySpecFile struct {
+ // path is the location of a file being managed through a MachineConfig.
+ // The Actions in the policy will apply to changes to the file at this path.
+ // +kubebuilder:validation:Required
+ Path string `json:"path"`
+ // actions represents the series of commands to be executed on changes to the file at
+ // the corresponding file path. Actions will be applied in the order that
+ // they are set in this list. If there are other incoming changes to other MachineConfig
+ // entries in the same update that require a reboot, the reboot will supercede these actions.
+ // Valid actions are Reboot, Drain, Reload, DaemonReload and None.
+ // The Reboot action and the None action cannot be used in conjunction with any of the other actions.
+ // This list supports a maximum of 10 entries.
+ // +kubebuilder:validation:Required
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=10
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions"
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions"
+ Actions []NodeDisruptionPolicySpecAction `json:"actions"`
+}
+
+// NodeDisruptionPolicyStatusFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object
+type NodeDisruptionPolicyStatusFile struct {
+ // path is the location of a file being managed through a MachineConfig.
+ // The Actions in the policy will apply to changes to the file at this path.
+ // +kubebuilder:validation:Required
+ Path string `json:"path"`
+ // actions represents the series of commands to be executed on changes to the file at
+ // the corresponding file path. Actions will be applied in the order that
+ // they are set in this list. If there are other incoming changes to other MachineConfig
+ // entries in the same update that require a reboot, the reboot will supercede these actions.
+ // Valid actions are Reboot, Drain, Reload, DaemonReload and None.
+ // The Reboot action and the None action cannot be used in conjunction with any of the other actions.
+ // This list supports a maximum of 10 entries.
+ // +kubebuilder:validation:Required
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=10
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions"
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions"
+ Actions []NodeDisruptionPolicyStatusAction `json:"actions"`
+}
+
+// NodeDisruptionPolicySpecUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object
+type NodeDisruptionPolicySpecUnit struct {
+ // name represents the service name of a systemd service managed through a MachineConfig
+ // Actions specified will be applied for changes to the named service.
+ // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long.
+ // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\".
+ // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope".
+ // +kubebuilder:validation:Required
+ Name NodeDisruptionPolicyServiceName `json:"name"`
+
+ // actions represents the series of commands to be executed on changes to the file at
+ // the corresponding file path. Actions will be applied in the order that
+ // they are set in this list. If there are other incoming changes to other MachineConfig
+ // entries in the same update that require a reboot, the reboot will supercede these actions.
+ // Valid actions are Reboot, Drain, Reload, DaemonReload and None.
+ // The Reboot action and the None action cannot be used in conjunction with any of the other actions.
+ // This list supports a maximum of 10 entries.
+ // +kubebuilder:validation:Required
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=10
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions"
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions"
+ Actions []NodeDisruptionPolicySpecAction `json:"actions"`
+}
+
+// NodeDisruptionPolicyStatusUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object
+type NodeDisruptionPolicyStatusUnit struct {
+ // name represents the service name of a systemd service managed through a MachineConfig
+ // Actions specified will be applied for changes to the named service.
+ // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long.
+ // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\".
+ // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope".
+ // +kubebuilder:validation:Required
+ Name NodeDisruptionPolicyServiceName `json:"name"`
+
+ // actions represents the series of commands to be executed on changes to the file at
+ // the corresponding file path. Actions will be applied in the order that
+ // they are set in this list. If there are other incoming changes to other MachineConfig
+ // entries in the same update that require a reboot, the reboot will supercede these actions.
+ // Valid actions are Reboot, Drain, Reload, DaemonReload and None.
+ // The Reboot action and the None action cannot be used in conjunction with any of the other actions.
+ // This list supports a maximum of 10 entries.
+ // +kubebuilder:validation:Required
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=10
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions"
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions"
+ Actions []NodeDisruptionPolicyStatusAction `json:"actions"`
+}
+
+// NodeDisruptionPolicySpecSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyConfig object
+type NodeDisruptionPolicySpecSSHKey struct {
+ // actions represents the series of commands to be executed on changes to the file at
+ // the corresponding file path. Actions will be applied in the order that
+ // they are set in this list. If there are other incoming changes to other MachineConfig
+ // entries in the same update that require a reboot, the reboot will supercede these actions.
+ // Valid actions are Reboot, Drain, Reload, DaemonReload and None.
+ // The Reboot action and the None action cannot be used in conjunction with any of the other actions.
+ // This list supports a maximum of 10 entries.
+ // +kubebuilder:validation:Required
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=10
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions"
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions"
+ Actions []NodeDisruptionPolicySpecAction `json:"actions"`
+}
+
+// NodeDisruptionPolicyStatusSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyClusterStatus object
+type NodeDisruptionPolicyStatusSSHKey struct {
+ // actions represents the series of commands to be executed on changes to the file at
+ // the corresponding file path. Actions will be applied in the order that
+ // they are set in this list. If there are other incoming changes to other MachineConfig
+ // entries in the same update that require a reboot, the reboot will supercede these actions.
+ // Valid actions are Reboot, Drain, Reload, DaemonReload and None.
+ // The Reboot action and the None action cannot be used in conjunction with any of the other actions.
+ // This list supports a maximum of 10 entries.
+ // +kubebuilder:validation:Required
+ // +listType=atomic
+ // +kubebuilder:validation:MaxItems=10
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions"
+ // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='None') ? size(self) == 1 : true", message="None action can only be specified standalone, as it will override any other actions"
+ Actions []NodeDisruptionPolicyStatusAction `json:"actions"`
+}
+
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Reload' ? has(self.reload) : !has(self.reload)",message="reload is required when type is Reload, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Restart' ? has(self.restart) : !has(self.restart)",message="restart is required when type is Restart, and forbidden otherwise"
+// +union
+type NodeDisruptionPolicySpecAction struct {
+ // type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed
+ // Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None.
+ // reload/restart requires a corresponding service target specified in the reload/restart field.
+ // Other values require no further configuration
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ Type NodeDisruptionPolicySpecActionType `json:"type"`
+ // reload specifies the service to reload, only valid if type is reload
+ // +optional
+ Reload *ReloadService `json:"reload,omitempty"`
+ // restart specifies the service to restart, only valid if type is restart
+ // +optional
+ Restart *RestartService `json:"restart,omitempty"`
+}
+
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Reload' ? has(self.reload) : !has(self.reload)",message="reload is required when type is Reload, and forbidden otherwise"
+// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Restart' ? has(self.restart) : !has(self.restart)",message="restart is required when type is Restart, and forbidden otherwise"
+// +union
+type NodeDisruptionPolicyStatusAction struct {
+ // type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed
+ // Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special.
+ // reload/restart requires a corresponding service target specified in the reload/restart field.
+ // Other values require no further configuration
+ // +unionDiscriminator
+ // +kubebuilder:validation:Required
+ Type NodeDisruptionPolicyStatusActionType `json:"type"`
+ // reload specifies the service to reload, only valid if type is reload
+ // +optional
+ Reload *ReloadService `json:"reload,omitempty"`
+ // restart specifies the service to restart, only valid if type is restart
+ // +optional
+ Restart *RestartService `json:"restart,omitempty"`
+}
+
+// ReloadService allows the user to specify the services to be reloaded
+type ReloadService struct {
+ // serviceName is the full name (e.g. crio.service) of the service to be reloaded
+ // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long.
+ // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\".
+ // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope".
+ // +kubebuilder:validation:Required
+ ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"`
+}
+
+// RestartService allows the user to specify the services to be restarted
+type RestartService struct {
+ // serviceName is the full name (e.g. crio.service) of the service to be restarted
+ // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long.
+ // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\".
+ // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope".
+ // +kubebuilder:validation:Required
+ ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"`
+}
+
+// NodeDisruptionPolicySpecActionType is a string enum used in a NodeDisruptionPolicySpecAction object. They describe an action to be performed.
+// +kubebuilder:validation:Enum:="Reboot";"Drain";"Reload";"Restart";"DaemonReload";"None"
+type NodeDisruptionPolicySpecActionType string
+
+// +kubebuilder:validation:XValidation:rule=`self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$')`, message="Invalid ${SERVICETYPE} in service name. Expected format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\",\".snapshot\", \".slice\" or \".scope\"."
+// +kubebuilder:validation:XValidation:rule=`self.matches('^[a-zA-Z0-9:._\\\\-]+\\..')`, message="Invalid ${NAME} in service name. Expected format is ${NAME}${SERVICETYPE}, where {NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\\\""
+// +kubebuilder:validation:MaxLength=255
+type NodeDisruptionPolicyServiceName string
+
+const (
+ // Reboot represents an action that will cause nodes to be rebooted. This is the default action by the MCO
+ // if a reboot policy is not found for a change/update being performed by the MCO.
+ RebootSpecAction NodeDisruptionPolicySpecActionType = "Reboot"
+
+ // Drain represents an action that will cause nodes to be drained of their workloads.
+ DrainSpecAction NodeDisruptionPolicySpecActionType = "Drain"
+
+ // Reload represents an action that will cause nodes to reload the service described by the Target field.
+ ReloadSpecAction NodeDisruptionPolicySpecActionType = "Reload"
+
+ // Restart represents an action that will cause nodes to restart the service described by the Target field.
+ RestartSpecAction NodeDisruptionPolicySpecActionType = "Restart"
+
+ // DaemonReload represents an action that TBD
+ DaemonReloadSpecAction NodeDisruptionPolicySpecActionType = "DaemonReload"
+
+ // None represents an action that no handling is required by the MCO.
+ NoneSpecAction NodeDisruptionPolicySpecActionType = "None"
+)
+
+// NodeDisruptionPolicyStatusActionType is a string enum used in a NodeDisruptionPolicyStatusAction object. They describe an action to be performed.
+// The key difference of this object from NodeDisruptionPolicySpecActionType is that there is a additional SpecialStatusAction value in this enum. This will only be
+// used by the MCO's controller to indicate some internal actions. They are not part of the NodeDisruptionPolicyConfig object and cannot be set by the user.
+// +kubebuilder:validation:Enum:="Reboot";"Drain";"Reload";"Restart";"DaemonReload";"None";"Special"
+type NodeDisruptionPolicyStatusActionType string
+
+const (
+ // Reboot represents an action that will cause nodes to be rebooted. This is the default action by the MCO
+ // if a reboot policy is not found for a change/update being performed by the MCO.
+ RebootStatusAction NodeDisruptionPolicyStatusActionType = "Reboot"
+
+ // Drain represents an action that will cause nodes to be drained of their workloads.
+ DrainStatusAction NodeDisruptionPolicyStatusActionType = "Drain"
+
+ // Reload represents an action that will cause nodes to reload the service described by the Target field.
+ ReloadStatusAction NodeDisruptionPolicyStatusActionType = "Reload"
+
+ // Restart represents an action that will cause nodes to restart the service described by the Target field.
+ RestartStatusAction NodeDisruptionPolicyStatusActionType = "Restart"
+
+ // DaemonReload represents an action that TBD
+ DaemonReloadStatusAction NodeDisruptionPolicyStatusActionType = "DaemonReload"
+
+ // None represents an action that no handling is required by the MCO.
+ NoneStatusAction NodeDisruptionPolicyStatusActionType = "None"
+
+ // Special represents an action that is internal to the MCO, and is not allowed in user defined NodeDisruption policies.
+ SpecialStatusAction NodeDisruptionPolicyStatusActionType = "Special"
+)
+
+// These strings will be used for MachineConfiguration Status conditions.
+const (
+ // MachineConfigurationBootImageUpdateDegraded means that the MCO ran into an error while reconciling boot images. This
+ // will cause the clusteroperators.config.openshift.io/machine-config to degrade. This condition will indicate the cause
+ // of the degrade, the progress of the update and the generation of the boot images configmap that it degraded on.
+ MachineConfigurationBootImageUpdateDegraded string = "BootImageUpdateDegraded"
+
+ // MachineConfigurationBootImageUpdateProgressing means that the MCO is in the process of reconciling boot images. This
+ // will cause the clusteroperators.config.openshift.io/machine-config to be in a Progressing state. This condition will
+ // indicate the progress of the update and the generation of the boot images configmap that triggered this update.
+ MachineConfigurationBootImageUpdateProgressing string = "BootImageUpdateProgressing"
+)
diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go
new file mode 100644
index 0000000000..35bb5ada36
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_network.go
@@ -0,0 +1,790 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=networks,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_70,operatorName=network,operatorOrdering=01
+
+// Network describes the cluster's desired network configuration. It is
+// consumed by the cluster-network-operator.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +k8s:openapi-gen=true
+// +openshift:compatibility-gen:level=1
+type Network struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec NetworkSpec `json:"spec,omitempty"`
+ Status NetworkStatus `json:"status,omitempty"`
+}
+
+// NetworkStatus is detailed operator status, which is distilled
+// up to the Network clusteroperator object.
+type NetworkStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetworkList contains a list of Network configurations
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type NetworkList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []Network `json:"items"`
+}
+
+// NetworkSpec is the top-level network configuration object.
+// +kubebuilder:validation:XValidation:rule="!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding) || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == oldSelf.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Restricted' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Global'",message="invalid value for IPForwarding, valid values are 'Restricted' or 'Global'"
+type NetworkSpec struct {
+ OperatorSpec `json:",inline"`
+
+ // clusterNetwork is the IP address pool to use for pod IPs.
+ // Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks.
+ // Others only support one. This is equivalent to the cluster-cidr.
+ ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"`
+
+ // serviceNetwork is the ip address pool to use for Service IPs
+ // Currently, all existing network providers only support a single value
+ // here, but this is an array to allow for growth.
+ ServiceNetwork []string `json:"serviceNetwork"`
+
+ // defaultNetwork is the "default" network that all pods will receive
+ DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"`
+
+ // additionalNetworks is a list of extra networks to make available to pods
+ // when multiple networks are enabled.
+ AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"`
+
+ // disableMultiNetwork specifies whether or not multiple pod network
+ // support should be disabled. If unset, this property defaults to
+ // 'false' and multiple network support is enabled.
+ DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"`
+
+ // useMultiNetworkPolicy enables a controller which allows for
+ // MultiNetworkPolicy objects to be used on additional networks as
+ // created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy
+ // objects, but NetworkPolicy objects only apply to the primary interface.
+ // With MultiNetworkPolicy, you can control the traffic that a pod can receive
+ // over the secondary interfaces. If unset, this property defaults to 'false'
+ // and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is
+ // 'true' then the value of this field is ignored.
+ UseMultiNetworkPolicy *bool `json:"useMultiNetworkPolicy,omitempty"`
+
+ // deployKubeProxy specifies whether or not a standalone kube-proxy should
+ // be deployed by the operator. Some network providers include kube-proxy
+ // or similar functionality. If unset, the plugin will attempt to select
+ // the correct value, which is false when OpenShift SDN and ovn-kubernetes are
+ // used and true otherwise.
+ // +optional
+ DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"`
+
+ // disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck
+ // CRs from a test pod to every node, apiserver and LB should be disabled or not.
+ // If unset, this property defaults to 'false' and network diagnostics is enabled.
+ // Setting this to 'true' would reduce the additional load of the pods performing the checks.
+ // +optional
+ // +kubebuilder:default:=false
+ DisableNetworkDiagnostics bool `json:"disableNetworkDiagnostics"`
+
+ // kubeProxyConfig lets us configure desired proxy configuration.
+ // If not specified, sensible defaults will be chosen by OpenShift directly.
+ // Not consumed by all network providers - currently only openshift-sdn.
+ KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"`
+
+ // exportNetworkFlows enables and configures the export of network flow metadata from the pod network
+ // by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin.
+ // If unset, flows will not be exported to any collector.
+ // +optional
+ ExportNetworkFlows *ExportNetworkFlows `json:"exportNetworkFlows,omitempty"`
+
+ // migration enables and configures the cluster network migration. The
+ // migration procedure allows to change the network type and the MTU.
+ // +optional
+ Migration *NetworkMigration `json:"migration,omitempty"`
+}
+
+// NetworkMigrationMode is an enumeration of the possible mode of the network migration
+// Valid values are "Live", "Offline" and omitted.
+// +kubebuilder:validation:Enum:=Live;Offline;""
+type NetworkMigrationMode string
+
+const (
+ // A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration.
+ LiveNetworkMigrationMode NetworkMigrationMode = "Live"
+ // An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration.
+ OfflineNetworkMigrationMode NetworkMigrationMode = "Offline"
+)
+
+// NetworkMigration represents the cluster network configuration.
+// +openshift:validation:FeatureGateAwareXValidation:featureGate=NetworkLiveMigration,rule="!has(self.mtu) || !has(self.networkType) || self.networkType == \"\" || has(self.mode) && self.mode == 'Live'",message="networkType migration in mode other than 'Live' may not be configured at the same time as mtu migration"
+type NetworkMigration struct {
+ // networkType is the target type of network migration. Set this to the
+ // target network type to allow changing the default network. If unset, the
+ // operation of changing cluster default network plugin will be rejected.
+ // The supported values are OpenShiftSDN, OVNKubernetes
+ // +optional
+ NetworkType string `json:"networkType,omitempty"`
+
+ // mtu contains the MTU migration configuration. Set this to allow changing
+ // the MTU values for the default network. If unset, the operation of
+ // changing the MTU for the default network will be rejected.
+ // +optional
+ MTU *MTUMigration `json:"mtu,omitempty"`
+
+ // features contains the features migration configuration. Set this to migrate
+ // feature configuration when changing the cluster default network provider.
+ // if unset, the default operation is to migrate all the configuration of
+ // supported features.
+ // +optional
+ Features *FeaturesMigration `json:"features,omitempty"`
+
+ // mode indicates the mode of network migration.
+ // The supported values are "Live", "Offline" and omitted.
+ // A "Live" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration.
+ // An "Offline" migration operation will cause service interruption. During an "Offline" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration.
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time.
+ // The current default value is "Offline".
+ // +optional
+ Mode NetworkMigrationMode `json:"mode"`
+}
+
+type FeaturesMigration struct {
+ // egressIP specifies whether or not the Egress IP configuration is migrated
+ // automatically when changing the cluster default network provider.
+ // If unset, this property defaults to 'true' and Egress IP configure is migrated.
+ // +optional
+ // +kubebuilder:default:=true
+ EgressIP bool `json:"egressIP,omitempty"`
+ // egressFirewall specifies whether or not the Egress Firewall configuration is migrated
+ // automatically when changing the cluster default network provider.
+ // If unset, this property defaults to 'true' and Egress Firewall configure is migrated.
+ // +optional
+ // +kubebuilder:default:=true
+ EgressFirewall bool `json:"egressFirewall,omitempty"`
+ // multicast specifies whether or not the multicast configuration is migrated
+ // automatically when changing the cluster default network provider.
+ // If unset, this property defaults to 'true' and multicast configure is migrated.
+ // +optional
+ // +kubebuilder:default:=true
+ Multicast bool `json:"multicast,omitempty"`
+}
+
+// MTUMigration MTU contains infomation about MTU migration.
+type MTUMigration struct {
+ // network contains information about MTU migration for the default network.
+ // Migrations are only allowed to MTU values lower than the machine's uplink
+ // MTU by the minimum appropriate offset.
+ // +optional
+ Network *MTUMigrationValues `json:"network,omitempty"`
+
+ // machine contains MTU migration configuration for the machine's uplink.
+ // Needs to be migrated along with the default network MTU unless the
+ // current uplink MTU already accommodates the default network MTU.
+ // +optional
+ Machine *MTUMigrationValues `json:"machine,omitempty"`
+}
+
+// MTUMigrationValues contains the values for a MTU migration.
+type MTUMigrationValues struct {
+ // to is the MTU to migrate to.
+ // +kubebuilder:validation:Minimum=0
+ To *uint32 `json:"to"`
+
+ // from is the MTU to migrate from.
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ From *uint32 `json:"from,omitempty"`
+}
+
+// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size
+// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If
+// the HostPrefix field is not used by the plugin, it can be left unset.
+// Not all network providers support multiple ClusterNetworks
+type ClusterNetworkEntry struct {
+ CIDR string `json:"cidr"`
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ HostPrefix uint32 `json:"hostPrefix,omitempty"`
+}
+
+// DefaultNetworkDefinition represents a single network plugin's configuration.
+// type must be specified, along with exactly one "Config" that matches the type.
+type DefaultNetworkDefinition struct {
+ // type is the type of network
+ // All NetworkTypes are supported except for NetworkTypeRaw
+ Type NetworkType `json:"type"`
+
+ // openShiftSDNConfig configures the openshift-sdn plugin
+ // +optional
+ OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"`
+
+ // ovnKubernetesConfig configures the ovn-kubernetes plugin.
+ // +optional
+ OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"`
+}
+
+// SimpleMacvlanConfig contains configurations for macvlan interface.
+type SimpleMacvlanConfig struct {
+ // master is the host interface to create the macvlan interface from.
+ // If not specified, it will be default route interface
+ // +optional
+ Master string `json:"master,omitempty"`
+
+ // IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).
+ // +optional
+ IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"`
+
+ // mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge
+ // +optional
+ Mode MacvlanMode `json:"mode,omitempty"`
+
+ // mtu is the mtu to use for the macvlan interface. if unset, host's
+ // kernel will select the value.
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ MTU uint32 `json:"mtu,omitempty"`
+}
+
+// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses
+type StaticIPAMAddresses struct {
+ // Address is the IP address in CIDR format
+ // +optional
+ Address string `json:"address"`
+ // Gateway is IP inside of subnet to designate as the gateway
+ // +optional
+ Gateway string `json:"gateway,omitempty"`
+}
+
+// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes
+type StaticIPAMRoutes struct {
+ // Destination points the IP route destination
+ Destination string `json:"destination"`
+ // Gateway is the route's next-hop IP address
+ // If unset, a default gateway is assumed (as determined by the CNI plugin).
+ // +optional
+ Gateway string `json:"gateway,omitempty"`
+}
+
+// StaticIPAMDNS provides DNS related information for static IPAM
+type StaticIPAMDNS struct {
+ // Nameservers points DNS servers for IP lookup
+ // +optional
+ Nameservers []string `json:"nameservers,omitempty"`
+ // Domain configures the domainname the local domain used for short hostname lookups
+ // +optional
+ Domain string `json:"domain,omitempty"`
+ // Search configures priority ordered search domains for short hostname lookups
+ // +optional
+ Search []string `json:"search,omitempty"`
+}
+
+// StaticIPAMConfig contains configurations for static IPAM (IP Address Management)
+type StaticIPAMConfig struct {
+ // Addresses configures IP address for the interface
+ // +optional
+ Addresses []StaticIPAMAddresses `json:"addresses,omitempty"`
+ // Routes configures IP routes for the interface
+ // +optional
+ Routes []StaticIPAMRoutes `json:"routes,omitempty"`
+ // DNS configures DNS for the interface
+ // +optional
+ DNS *StaticIPAMDNS `json:"dns,omitempty"`
+}
+
+// IPAMConfig contains configurations for IPAM (IP Address Management)
+type IPAMConfig struct {
+ // Type is the type of IPAM module will be used for IP Address Management(IPAM).
+ // The supported values are IPAMTypeDHCP, IPAMTypeStatic
+ Type IPAMType `json:"type"`
+
+ // StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic
+ // +optional
+ StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"`
+}
+
+// AdditionalNetworkDefinition configures an extra network that is available but not
+// created by default. Instead, pods must request them by name.
+// type must be specified, along with exactly one "Config" that matches the type.
+type AdditionalNetworkDefinition struct {
+ // type is the type of network
+ // The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan
+ Type NetworkType `json:"type"`
+
+ // name is the name of the network. This will be populated in the resulting CRD
+ // This must be unique.
+ Name string `json:"name"`
+
+ // namespace is the namespace of the network. This will be populated in the resulting CRD
+ // If not given the network will be created in the default namespace.
+ Namespace string `json:"namespace,omitempty"`
+
+ // rawCNIConfig is the raw CNI configuration json to create in the
+ // NetworkAttachmentDefinition CRD
+ RawCNIConfig string `json:"rawCNIConfig,omitempty"`
+
+ // SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan
+ // +optional
+ SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"`
+}
+
+// OpenShiftSDNConfig configures the three openshift-sdn plugins
+type OpenShiftSDNConfig struct {
+ // mode is one of "Multitenant", "Subnet", or "NetworkPolicy"
+ Mode SDNMode `json:"mode"`
+
+ // vxlanPort is the port to use for all vxlan packets. The default is 4789.
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ VXLANPort *uint32 `json:"vxlanPort,omitempty"`
+
+ // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset.
+ // This must be 50 bytes smaller than the machine's uplink.
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ MTU *uint32 `json:"mtu,omitempty"`
+
+ // useExternalOpenvswitch used to control whether the operator would deploy an OVS
+ // DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always
+ // run as a system service, and this flag is ignored.
+ // DEPRECATED: non-functional as of 4.6
+ // +optional
+ UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"`
+
+ // enableUnidling controls whether or not the service proxy will support idling
+ // and unidling of services. By default, unidling is enabled.
+ EnableUnidling *bool `json:"enableUnidling,omitempty"`
+}
+
+// ovnKubernetesConfig contains the configuration parameters for networks
+// using the ovn-kubernetes network project
+type OVNKubernetesConfig struct {
+ // mtu is the MTU to use for the tunnel interface. This must be 100
+ // bytes smaller than the uplink mtu.
+ // Default is 1400
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ MTU *uint32 `json:"mtu,omitempty"`
+ // geneve port is the UDP port to be used by geneve encapulation.
+ // Default is 6081
+ // +kubebuilder:validation:Minimum=1
+ // +optional
+ GenevePort *uint32 `json:"genevePort,omitempty"`
+ // HybridOverlayConfig configures an additional overlay network for peers that are
+ // not using OVN.
+ // +optional
+ HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"`
+ // ipsecConfig enables and configures IPsec for pods on the pod network within the
+ // cluster.
+ // +optional
+ // +kubebuilder:default={"mode": "Disabled"}
+ // +default={"mode": "Disabled"}
+ IPsecConfig *IPsecConfig `json:"ipsecConfig,omitempty"`
+ // policyAuditConfig is the configuration for network policy audit events. If unset,
+ // reported defaults are used.
+ // +optional
+ PolicyAuditConfig *PolicyAuditConfig `json:"policyAuditConfig,omitempty"`
+ // gatewayConfig holds the configuration for node gateway options.
+ // +optional
+ GatewayConfig *GatewayConfig `json:"gatewayConfig,omitempty"`
+ // v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the
+ // default one is being already used by something else. It must not overlap with
+ // any other subnet being used by OpenShift or by the node network. The size of the
+ // subnet must be larger than the number of nodes. The value cannot be changed
+ // after installation.
+ // Default is 100.64.0.0/16
+ // +optional
+ V4InternalSubnet string `json:"v4InternalSubnet,omitempty"`
+ // v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the
+ // default one is being already used by something else. It must not overlap with
+ // any other subnet being used by OpenShift or by the node network. The size of the
+ // subnet must be larger than the number of nodes. The value cannot be changed
+ // after installation.
+ // Default is fd98::/48
+ // +optional
+ V6InternalSubnet string `json:"v6InternalSubnet,omitempty"`
+ // egressIPConfig holds the configuration for EgressIP options.
+ // +optional
+ EgressIPConfig EgressIPConfig `json:"egressIPConfig,omitempty"`
+ // ipv4 allows users to configure IP settings for IPv4 connections. When ommitted,
+ // this means no opinions and the default configuration is used. Check individual
+ // fields within ipv4 for details of default values.
+ // +optional
+ IPv4 *IPv4OVNKubernetesConfig `json:"ipv4,omitempty"`
+ // ipv6 allows users to configure IP settings for IPv6 connections. When ommitted,
+ // this means no opinions and the default configuration is used. Check individual
+ // fields within ipv4 for details of default values.
+ // +optional
+ IPv6 *IPv6OVNKubernetesConfig `json:"ipv6,omitempty"`
+}
+
+type IPv4OVNKubernetesConfig struct {
+ // internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally
+ // by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect
+ // architecture that connects the cluster routers on each node together to enable
+ // east west traffic. The subnet chosen should not overlap with other networks
+ // specified for OVN-Kubernetes as well as other networks used on the host.
+ // The value cannot be changed after installation.
+ // When ommitted, this means no opinion and the platform is left to choose a reasonable
+ // default which is subject to change over time.
+ // The current default subnet is 100.88.0.0/16
+ // The subnet must be large enough to accomadate one IP per node in your cluster
+ // The value must be in proper IPV4 CIDR format
+ // +kubebuilder:validation:MaxLength=18
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0"
+ // +optional
+ InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"`
+ // internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the
+ // default one is being already used by something else. It must not overlap with
+ // any other subnet being used by OpenShift or by the node network. The size of the
+ // subnet must be larger than the number of nodes. The value cannot be changed
+ // after installation.
+ // The current default value is 100.64.0.0/16
+ // The subnet must be large enough to accomadate one IP per node in your cluster
+ // The value must be in proper IPV4 CIDR format
+ // +kubebuilder:validation:MaxLength=18
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 30",message="subnet must be in the range /0 to /30 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0"
+ // +optional
+ InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"`
+}
+
+type IPv6OVNKubernetesConfig struct {
+ // internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally
+ // by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect
+ // architecture that connects the cluster routers on each node together to enable
+ // east west traffic. The subnet chosen should not overlap with other networks
+ // specified for OVN-Kubernetes as well as other networks used on the host.
+ // The value cannot be changed after installation.
+ // When ommitted, this means no opinion and the platform is left to choose a reasonable
+ // default which is subject to change over time.
+ // The subnet must be large enough to accomadate one IP per node in your cluster
+ // The current default subnet is fd97::/64
+ // The value must be in proper IPV6 CIDR format
+ // Note that IPV6 dual addresses are not permitted
+ // +kubebuilder:validation:MaxLength=48
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive"
+ // +optional
+ InternalTransitSwitchSubnet string `json:"internalTransitSwitchSubnet,omitempty"`
+ // internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the
+ // default one is being already used by something else. It must not overlap with
+ // any other subnet being used by OpenShift or by the node network. The size of the
+ // subnet must be larger than the number of nodes. The value cannot be changed
+ // after installation.
+ // The subnet must be large enough to accomadate one IP per node in your cluster
+ // The current default value is fd98::/48
+ // The value must be in proper IPV6 CIDR format
+ // Note that IPV6 dual addresses are not permitted
+ // +kubebuilder:validation:MaxLength=48
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive"
+ // +optional
+ InternalJoinSubnet string `json:"internalJoinSubnet,omitempty"`
+}
+
+type HybridOverlayConfig struct {
+ // HybridClusterNetwork defines a network space given to nodes on an additional overlay network.
+ HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"`
+ // HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network.
+ // Default is 4789
+ // +optional
+ HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"`
+}
+
+// +kubebuilder:validation:XValidation:rule="self == oldSelf || has(self.mode)",message="ipsecConfig.mode is required"
+type IPsecConfig struct {
+ // mode defines the behaviour of the ipsec configuration within the platform.
+ // Valid values are `Disabled`, `External` and `Full`.
+ // When 'Disabled', ipsec will not be enabled at the node level.
+ // When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters.
+ // This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator.
+ // When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured.
+ // Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays),
+ // this is left to the user to configure.
+ // +kubebuilder:validation:Enum=Disabled;External;Full
+ // +optional
+ Mode IPsecMode `json:"mode,omitempty"`
+}
+
+type IPForwardingMode string
+
+const (
+ // IPForwardingRestricted limits the IP forwarding on OVN-Kube managed interfaces (br-ex, br-ex1) to only required
+ // service and other k8s related traffic
+ IPForwardingRestricted IPForwardingMode = "Restricted"
+
+ // IPForwardingGlobal allows all IP traffic to be forwarded across OVN-Kube managed interfaces
+ IPForwardingGlobal IPForwardingMode = "Global"
+)
+
+// GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides
+type GatewayConfig struct {
+ // RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port
+ // into the host before sending it out. If this is not set, traffic will always egress directly
+ // from OVN to outside without touching the host stack. Setting this to true means hardware
+ // offload will not be supported. Default is false if GatewayConfig is specified.
+ // +kubebuilder:default:=false
+ // +optional
+ RoutingViaHost bool `json:"routingViaHost,omitempty"`
+ // IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex).
+ // By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other
+ // IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across
+ // OVN-Kubernetes managed interfaces, then set this field to "Global".
+ // The supported values are "Restricted" and "Global".
+ // +optional
+ IPForwarding IPForwardingMode `json:"ipForwarding,omitempty"`
+ // ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default
+ // configuration is used. Check individual members fields within ipv4 for details of default values.
+ // +optional
+ IPv4 IPv4GatewayConfig `json:"ipv4,omitempty"`
+ // ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default
+ // configuration is used. Check individual members fields within ipv6 for details of default values.
+ // +optional
+ IPv6 IPv6GatewayConfig `json:"ipv6,omitempty"`
+}
+
+// IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes
+type IPv4GatewayConfig struct {
+ // internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by
+ // ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these
+ // addresses, as well as the shared gateway bridge interface. The values can be changed after
+ // installation. The subnet chosen should not overlap with other networks specified for
+ // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must
+ // be large enough to accommodate 6 IPs (maximum prefix length /29).
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time.
+ // The current default subnet is 169.254.169.0/29
+ // The value must be in proper IPV4 CIDR format
+ // +kubebuilder:validation:MaxLength=18
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 29",message="subnet must be in the range /0 to /29 inclusive"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && int(self.split('.')[0]) > 0",message="first IP address octet must not be 0"
+ // +optional
+ InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"`
+}
+
+// IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes
+type IPv6GatewayConfig struct {
+ // internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by
+ // ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these
+ // addresses, as well as the shared gateway bridge interface. The values can be changed after
+ // installation. The subnet chosen should not overlap with other networks specified for
+ // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must
+ // be large enough to accommodate 6 IPs (maximum prefix length /125).
+ // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time.
+ // The current default subnet is fd69::/125
+ // Note that IPV6 dual addresses are not permitted
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format"
+ // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive"
+ // +optional
+ InternalMasqueradeSubnet string `json:"internalMasqueradeSubnet,omitempty"`
+}
+
+type ExportNetworkFlows struct {
+ // netFlow defines the NetFlow configuration.
+ // +optional
+ NetFlow *NetFlowConfig `json:"netFlow,omitempty"`
+ // sFlow defines the SFlow configuration.
+ // +optional
+ SFlow *SFlowConfig `json:"sFlow,omitempty"`
+ // ipfix defines IPFIX configuration.
+ // +optional
+ IPFIX *IPFIXConfig `json:"ipfix,omitempty"`
+}
+
+type NetFlowConfig struct {
+ // netFlow defines the NetFlow collectors that will consume the flow data exported from OVS.
+ // It is a list of strings formatted as ip:port with a maximum of ten items
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:MaxItems=10
+ Collectors []IPPort `json:"collectors,omitempty"`
+}
+
+type SFlowConfig struct {
+ // sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:MaxItems=10
+ Collectors []IPPort `json:"collectors,omitempty"`
+}
+
+type IPFIXConfig struct {
+ // ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items
+ // +kubebuilder:validation:MinItems=1
+ // +kubebuilder:validation:MaxItems=10
+ Collectors []IPPort `json:"collectors,omitempty"`
+}
+
+// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$`
+type IPPort string
+
+type PolicyAuditConfig struct {
+ // rateLimit is the approximate maximum number of messages to generate per-second per-node. If
+ // unset the default of 20 msg/sec is used.
+ // +kubebuilder:default=20
+ // +kubebuilder:validation:Minimum=1
+ // +optional
+ RateLimit *uint32 `json:"rateLimit,omitempty"`
+
+ // maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs
+ // Units are in MB and the Default is 50MB
+ // +kubebuilder:default=50
+ // +kubebuilder:validation:Minimum=1
+ // +optional
+ MaxFileSize *uint32 `json:"maxFileSize,omitempty"`
+
+ // maxLogFiles specifies the maximum number of ACL_audit log files that can be present.
+ // +kubebuilder:default=5
+ // +kubebuilder:validation:Minimum=1
+ // +optional
+ MaxLogFiles *int32 `json:"maxLogFiles,omitempty"`
+
+ // destination is the location for policy log messages.
+ // Regardless of this config, persistent logs will always be dumped to the host
+ // at /var/log/ovn/ however
+ // Additionally syslog output may be configured as follows.
+ // Valid values are:
+ // - "libc" -> to use the libc syslog() function of the host node's journdald process
+ // - "udp:host:port" -> for sending syslog over UDP
+ // - "unix:file" -> for using the UNIX domain socket directly
+ // - "null" -> to discard all messages logged to syslog
+ // The default is "null"
+ // +kubebuilder:default=null
+ // +kubebuilder:pattern='^libc$|^null$|^udp:(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):([0-9]){0,5}$|^unix:(\/[^\/ ]*)+([^\/\s])$'
+ // +optional
+ Destination string `json:"destination,omitempty"`
+
+ // syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0"
+ // +kubebuilder:default=local0
+ // +kubebuilder:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;clock;ftp;ntp;audit;alert;clock2;local0;local1;local2;local3;local4;local5;local6;local7
+ // +optional
+ SyslogFacility string `json:"syslogFacility,omitempty"`
+}
+
+// NetworkType describes the network plugin type to configure
+type NetworkType string
+
+// ProxyArgumentList is a list of arguments to pass to the kubeproxy process
+type ProxyArgumentList []string
+
+// ProxyConfig defines the configuration knobs for kubeproxy
+// All of these are optional and have sensible defaults
+type ProxyConfig struct {
+ // An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted
+ // in large clusters for performance reasons, but this is no longer necessary, and there is no reason
+ // to change this from the default value.
+ // Default: 30s
+ IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"`
+
+ // The address to "bind" on
+ // Defaults to 0.0.0.0
+ BindAddress string `json:"bindAddress,omitempty"`
+
+ // Any additional arguments to pass to the kubeproxy process
+ ProxyArguments map[string]ProxyArgumentList `json:"proxyArguments,omitempty"`
+}
+
+// EgressIPConfig defines the configuration knobs for egressip
+type EgressIPConfig struct {
+ // reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds.
+ // If the EgressIP node cannot be reached within this timeout, the node is declared down.
+ // Setting a large value may cause the EgressIP feature to react slowly to node changes.
+ // In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable.
+ // When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time.
+ // The current default is 1 second.
+ // A value of 0 disables the EgressIP node's reachability check.
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=60
+ // +optional
+ ReachabilityTotalTimeoutSeconds *uint32 `json:"reachabilityTotalTimeoutSeconds,omitempty"`
+}
+
+const (
+ // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured
+ NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN"
+
+ // NetworkTypeOVNKubernetes means the ovn-kubernetes project will be configured.
+ // This is currently not implemented.
+ NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes"
+
+ // NetworkTypeRaw
+ NetworkTypeRaw NetworkType = "Raw"
+
+ // NetworkTypeSimpleMacvlan
+ NetworkTypeSimpleMacvlan NetworkType = "SimpleMacvlan"
+)
+
+// SDNMode is the Mode the openshift-sdn plugin is in
+type SDNMode string
+
+const (
+ // SDNModeSubnet is a simple mode that offers no isolation between pods
+ SDNModeSubnet SDNMode = "Subnet"
+
+ // SDNModeMultitenant is a special "multitenant" mode that offers limited
+ // isolation configuration between namespaces
+ SDNModeMultitenant SDNMode = "Multitenant"
+
+ // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows
+ // for sophisticated network isolation and segmenting. This is the default.
+ SDNModeNetworkPolicy SDNMode = "NetworkPolicy"
+)
+
+// MacvlanMode is the Mode of macvlan. The value are lowercase to match the CNI plugin
+// config values. See "man ip-link" for its detail.
+type MacvlanMode string
+
+const (
+ // MacvlanModeBridge is the macvlan with thin bridge function.
+ MacvlanModeBridge MacvlanMode = "Bridge"
+ // MacvlanModePrivate
+ MacvlanModePrivate MacvlanMode = "Private"
+ // MacvlanModeVEPA is used with Virtual Ethernet Port Aggregator
+ // (802.1qbg) swtich
+ MacvlanModeVEPA MacvlanMode = "VEPA"
+ // MacvlanModePassthru
+ MacvlanModePassthru MacvlanMode = "Passthru"
+)
+
+// IPAMType describes the IP address management type to configure
+type IPAMType string
+
+const (
+ // IPAMTypeDHCP uses DHCP for IP management
+ IPAMTypeDHCP IPAMType = "DHCP"
+ // IPAMTypeStatic uses static IP
+ IPAMTypeStatic IPAMType = "Static"
+)
+
+// IPsecMode enumerates the modes for IPsec configuration
+type IPsecMode string
+
+const (
+ // IPsecModeDisabled disables IPsec altogether
+ IPsecModeDisabled IPsecMode = "Disabled"
+ // IPsecModeExternal enables IPsec on the node level, but expects the user to configure it using k8s-nmstate or
+ // other means - it is most useful for secure communication from the cluster to external endpoints
+ IPsecModeExternal IPsecMode = "External"
+ // IPsecModeFull enables IPsec on the node level (the same as IPsecModeExternal), and configures it to secure communication
+ // between pods on the cluster network.
+ IPsecModeFull IPsecMode = "Full"
+)
diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go
new file mode 100644
index 0000000000..3ae83e6948
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go
@@ -0,0 +1,67 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=openshiftapiservers,scope=Cluster,categories=coreoperators
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_30,operatorName=openshift-apiserver,operatorOrdering=01
+
+// OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OpenShiftAPIServer struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the specification of the desired behavior of the OpenShift API Server.
+ // +kubebuilder:validation:Required
+ // +required
+ Spec OpenShiftAPIServerSpec `json:"spec"`
+
+ // status defines the observed status of the OpenShift API Server.
+ // +optional
+ Status OpenShiftAPIServerStatus `json:"status"`
+}
+
+type OpenShiftAPIServerSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type OpenShiftAPIServerStatus struct {
+ OperatorStatus `json:",inline"`
+
+ // latestAvailableRevision is the latest revision used as suffix of revisioned
+ // secrets like encryption-config. A new revision causes a new deployment of
+ // pods.
+ // +optional
+ // +kubebuilder:validation:Minimum=0
+ LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OpenShiftAPIServerList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OpenShiftAPIServerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []OpenShiftAPIServer `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go
new file mode 100644
index 0000000000..8e8929a903
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go
@@ -0,0 +1,57 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=openshiftcontrollermanagers,scope=Cluster,categories=coreoperators
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=openshift-controller-manager,operatorOrdering=02
+
+// OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OpenShiftControllerManager struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec OpenShiftControllerManagerSpec `json:"spec"`
+ // +optional
+ Status OpenShiftControllerManagerStatus `json:"status"`
+}
+
+type OpenShiftControllerManagerSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type OpenShiftControllerManagerStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OpenShiftControllerManagerList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type OpenShiftControllerManagerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []OpenShiftControllerManager `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go
new file mode 100644
index 0000000000..448c458c19
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go
@@ -0,0 +1,60 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=kubeschedulers,scope=Cluster,categories=coreoperators
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=kube-scheduler,operatorOrdering=01
+
+// KubeScheduler provides information to configure an operator to manage scheduler.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type KubeScheduler struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // spec is the specification of the desired behavior of the Kubernetes Scheduler
+ // +kubebuilder:validation:Required
+ // +required
+ Spec KubeSchedulerSpec `json:"spec"`
+
+ // status is the most recently observed status of the Kubernetes Scheduler
+ // +optional
+ Status KubeSchedulerStatus `json:"status"`
+}
+
+type KubeSchedulerSpec struct {
+ StaticPodOperatorSpec `json:",inline"`
+}
+
+type KubeSchedulerStatus struct {
+ StaticPodOperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KubeSchedulerList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type KubeSchedulerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []KubeScheduler `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go
new file mode 100644
index 0000000000..e4d8d1d7ad
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go
@@ -0,0 +1,59 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=servicecas,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/475
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=service-ca,operatorOrdering=02
+
+// ServiceCA provides information to configure an operator to manage the service cert controllers
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ServiceCA struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ //spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ServiceCASpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status ServiceCAStatus `json:"status"`
+}
+
+type ServiceCASpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type ServiceCAStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceCAList is a collection of items
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ServiceCAList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []ServiceCA `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go
new file mode 100644
index 0000000000..006b8bb99d
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go
@@ -0,0 +1,54 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server
+// DEPRECATED: will be removed in 4.6
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ServiceCatalogAPIServer struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ServiceCatalogAPIServerSpec `json:"spec"`
+ // +optional
+ Status ServiceCatalogAPIServerStatus `json:"status"`
+}
+
+type ServiceCatalogAPIServerSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type ServiceCatalogAPIServerStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceCatalogAPIServerList is a collection of items
+// DEPRECATED: will be removed in 4.6
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ServiceCatalogAPIServerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []ServiceCatalogAPIServer `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go
new file mode 100644
index 0000000000..859965408b
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go
@@ -0,0 +1,54 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager
+// DEPRECATED: will be removed in 4.6
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ServiceCatalogControllerManager struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ServiceCatalogControllerManagerSpec `json:"spec"`
+ // +optional
+ Status ServiceCatalogControllerManagerStatus `json:"status"`
+}
+
+type ServiceCatalogControllerManagerSpec struct {
+ OperatorSpec `json:",inline"`
+}
+
+type ServiceCatalogControllerManagerStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceCatalogControllerManagerList is a collection of items
+// DEPRECATED: will be removed in 4.6
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ServiceCatalogControllerManagerList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []ServiceCatalogControllerManager `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go
new file mode 100644
index 0000000000..aa48b0c84f
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go
@@ -0,0 +1,80 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=storages,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/670
+// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=storage,operatorOrdering=01
+
+// Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Storage struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec StorageSpec `json:"spec"`
+
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status StorageStatus `json:"status"`
+}
+
+// StorageDriverType indicates whether CSI migration should be enabled for drivers where it is optional.
+// +kubebuilder:validation:Enum="";LegacyDeprecatedInTreeDriver;CSIWithMigrationDriver
+type StorageDriverType string
+
+const (
+ LegacyDeprecatedInTreeDriver StorageDriverType = "LegacyDeprecatedInTreeDriver"
+ CSIWithMigrationDriver StorageDriverType = "CSIWithMigrationDriver"
+)
+
+// StorageSpec is the specification of the desired behavior of the cluster storage operator.
+type StorageSpec struct {
+ OperatorSpec `json:",inline"`
+
+ // VSphereStorageDriver indicates the storage driver to use on VSphere clusters.
+ // Once this field is set to CSIWithMigrationDriver, it can not be changed.
+ // If this is empty, the platform will choose a good default,
+ // which may change over time without notice.
+ // The current default is CSIWithMigrationDriver and may not be changed.
+ // DEPRECATED: This field will be removed in a future release.
+ // +kubebuilder:validation:XValidation:rule="self != \"LegacyDeprecatedInTreeDriver\"",message="VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver"
+ // +optional
+ VSphereStorageDriver StorageDriverType `json:"vsphereStorageDriver"`
+}
+
+// StorageStatus defines the observed status of the cluster storage operator.
+type StorageStatus struct {
+ OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StorageList contains a list of Storages.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type StorageList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []Storage `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..8b8ef76918
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go
@@ -0,0 +1,5046 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ authorizationv1 "k8s.io/api/authorization/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSCSIDriverConfigSpec) DeepCopyInto(out *AWSCSIDriverConfigSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCSIDriverConfigSpec.
+func (in *AWSCSIDriverConfigSpec) DeepCopy() *AWSCSIDriverConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSCSIDriverConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) {
+ *out = *in
+ out.ConnectionIdleTimeout = in.ConnectionIdleTimeout
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClassicLoadBalancerParameters.
+func (in *AWSClassicLoadBalancerParameters) DeepCopy() *AWSClassicLoadBalancerParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSClassicLoadBalancerParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSLoadBalancerParameters) DeepCopyInto(out *AWSLoadBalancerParameters) {
+ *out = *in
+ if in.ClassicLoadBalancerParameters != nil {
+ in, out := &in.ClassicLoadBalancerParameters, &out.ClassicLoadBalancerParameters
+ *out = new(AWSClassicLoadBalancerParameters)
+ **out = **in
+ }
+ if in.NetworkLoadBalancerParameters != nil {
+ in, out := &in.NetworkLoadBalancerParameters, &out.NetworkLoadBalancerParameters
+ *out = new(AWSNetworkLoadBalancerParameters)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerParameters.
+func (in *AWSLoadBalancerParameters) DeepCopy() *AWSLoadBalancerParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSLoadBalancerParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSNetworkLoadBalancerParameters) DeepCopyInto(out *AWSNetworkLoadBalancerParameters) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkLoadBalancerParameters.
+func (in *AWSNetworkLoadBalancerParameters) DeepCopy() *AWSNetworkLoadBalancerParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSNetworkLoadBalancerParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AccessLogging) DeepCopyInto(out *AccessLogging) {
+ *out = *in
+ in.Destination.DeepCopyInto(&out.Destination)
+ in.HTTPCaptureHeaders.DeepCopyInto(&out.HTTPCaptureHeaders)
+ if in.HTTPCaptureCookies != nil {
+ in, out := &in.HTTPCaptureCookies, &out.HTTPCaptureCookies
+ *out = make([]IngressControllerCaptureHTTPCookie, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogging.
+func (in *AccessLogging) DeepCopy() *AccessLogging {
+ if in == nil {
+ return nil
+ }
+ out := new(AccessLogging)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AddPage) DeepCopyInto(out *AddPage) {
+ *out = *in
+ if in.DisabledActions != nil {
+ in, out := &in.DisabledActions, &out.DisabledActions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddPage.
+func (in *AddPage) DeepCopy() *AddPage {
+ if in == nil {
+ return nil
+ }
+ out := new(AddPage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) {
+ *out = *in
+ if in.SimpleMacvlanConfig != nil {
+ in, out := &in.SimpleMacvlanConfig, &out.SimpleMacvlanConfig
+ *out = new(SimpleMacvlanConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition.
+func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition {
+ if in == nil {
+ return nil
+ }
+ out := new(AdditionalNetworkDefinition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Authentication) DeepCopyInto(out *Authentication) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication.
+func (in *Authentication) DeepCopy() *Authentication {
+ if in == nil {
+ return nil
+ }
+ out := new(Authentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Authentication) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Authentication, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList.
+func (in *AuthenticationList) DeepCopy() *AuthenticationList {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec.
+func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) {
+ *out = *in
+ out.OAuthAPIServer = in.OAuthAPIServer
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus.
+func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureCSIDriverConfigSpec) DeepCopyInto(out *AzureCSIDriverConfigSpec) {
+ *out = *in
+ if in.DiskEncryptionSet != nil {
+ in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet
+ *out = new(AzureDiskEncryptionSet)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureCSIDriverConfigSpec.
+func (in *AzureCSIDriverConfigSpec) DeepCopy() *AzureCSIDriverConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureCSIDriverConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureDiskEncryptionSet) DeepCopyInto(out *AzureDiskEncryptionSet) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskEncryptionSet.
+func (in *AzureDiskEncryptionSet) DeepCopy() *AzureDiskEncryptionSet {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureDiskEncryptionSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIDriverConfigSpec) DeepCopyInto(out *CSIDriverConfigSpec) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSCSIDriverConfigSpec)
+ **out = **in
+ }
+ if in.Azure != nil {
+ in, out := &in.Azure, &out.Azure
+ *out = new(AzureCSIDriverConfigSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GCP != nil {
+ in, out := &in.GCP, &out.GCP
+ *out = new(GCPCSIDriverConfigSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IBMCloud != nil {
+ in, out := &in.IBMCloud, &out.IBMCloud
+ *out = new(IBMCloudCSIDriverConfigSpec)
+ **out = **in
+ }
+ if in.VSphere != nil {
+ in, out := &in.VSphere, &out.VSphere
+ *out = new(VSphereCSIDriverConfigSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverConfigSpec.
+func (in *CSIDriverConfigSpec) DeepCopy() *CSIDriverConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CSIDriverConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSISnapshotController) DeepCopyInto(out *CSISnapshotController) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotController.
+func (in *CSISnapshotController) DeepCopy() *CSISnapshotController {
+ if in == nil {
+ return nil
+ }
+ out := new(CSISnapshotController)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CSISnapshotController) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSISnapshotControllerList) DeepCopyInto(out *CSISnapshotControllerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CSISnapshotController, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerList.
+func (in *CSISnapshotControllerList) DeepCopy() *CSISnapshotControllerList {
+ if in == nil {
+ return nil
+ }
+ out := new(CSISnapshotControllerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CSISnapshotControllerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSISnapshotControllerSpec) DeepCopyInto(out *CSISnapshotControllerSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerSpec.
+func (in *CSISnapshotControllerSpec) DeepCopy() *CSISnapshotControllerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CSISnapshotControllerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSISnapshotControllerStatus) DeepCopyInto(out *CSISnapshotControllerStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerStatus.
+func (in *CSISnapshotControllerStatus) DeepCopy() *CSISnapshotControllerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CSISnapshotControllerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientTLS) DeepCopyInto(out *ClientTLS) {
+ *out = *in
+ out.ClientCA = in.ClientCA
+ if in.AllowedSubjectPatterns != nil {
+ in, out := &in.AllowedSubjectPatterns, &out.AllowedSubjectPatterns
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientTLS.
+func (in *ClientTLS) DeepCopy() *ClientTLS {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientTLS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudCredential) DeepCopyInto(out *CloudCredential) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredential.
+func (in *CloudCredential) DeepCopy() *CloudCredential {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudCredential)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudCredential) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudCredentialList) DeepCopyInto(out *CloudCredentialList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CloudCredential, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialList.
+func (in *CloudCredentialList) DeepCopy() *CloudCredentialList {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudCredentialList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CloudCredentialList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudCredentialSpec) DeepCopyInto(out *CloudCredentialSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialSpec.
+func (in *CloudCredentialSpec) DeepCopy() *CloudCredentialSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudCredentialSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CloudCredentialStatus) DeepCopyInto(out *CloudCredentialStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialStatus.
+func (in *CloudCredentialStatus) DeepCopy() *CloudCredentialStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CloudCredentialStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCSIDriver) DeepCopyInto(out *ClusterCSIDriver) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriver.
+func (in *ClusterCSIDriver) DeepCopy() *ClusterCSIDriver {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCSIDriver)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterCSIDriver) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCSIDriverList) DeepCopyInto(out *ClusterCSIDriverList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterCSIDriver, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverList.
+func (in *ClusterCSIDriverList) DeepCopy() *ClusterCSIDriverList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCSIDriverList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterCSIDriverList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCSIDriverSpec) DeepCopyInto(out *ClusterCSIDriverSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ in.DriverConfig.DeepCopyInto(&out.DriverConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverSpec.
+func (in *ClusterCSIDriverSpec) DeepCopy() *ClusterCSIDriverSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCSIDriverSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterCSIDriverStatus) DeepCopyInto(out *ClusterCSIDriverStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverStatus.
+func (in *ClusterCSIDriverStatus) DeepCopy() *ClusterCSIDriverStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterCSIDriverStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry.
+func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterNetworkEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigList) DeepCopyInto(out *ConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Config, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList.
+func (in *ConfigList) DeepCopy() *ConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec.
+func (in *ConfigSpec) DeepCopy() *ConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus.
+func (in *ConfigStatus) DeepCopy() *ConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Console) DeepCopyInto(out *Console) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console.
+func (in *Console) DeepCopy() *Console {
+ if in == nil {
+ return nil
+ }
+ out := new(Console)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Console) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleConfigRoute) DeepCopyInto(out *ConsoleConfigRoute) {
+ *out = *in
+ out.Secret = in.Secret
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleConfigRoute.
+func (in *ConsoleConfigRoute) DeepCopy() *ConsoleConfigRoute {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleConfigRoute)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) {
+ *out = *in
+ out.CustomLogoFile = in.CustomLogoFile
+ in.DeveloperCatalog.DeepCopyInto(&out.DeveloperCatalog)
+ in.ProjectAccess.DeepCopyInto(&out.ProjectAccess)
+ in.QuickStarts.DeepCopyInto(&out.QuickStarts)
+ in.AddPage.DeepCopyInto(&out.AddPage)
+ if in.Perspectives != nil {
+ in, out := &in.Perspectives, &out.Perspectives
+ *out = make([]Perspective, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCustomization.
+func (in *ConsoleCustomization) DeepCopy() *ConsoleCustomization {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleCustomization)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleList) DeepCopyInto(out *ConsoleList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Console, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList.
+func (in *ConsoleList) DeepCopy() *ConsoleList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConsoleList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleProviders) DeepCopyInto(out *ConsoleProviders) {
+ *out = *in
+ if in.Statuspage != nil {
+ in, out := &in.Statuspage, &out.Statuspage
+ *out = new(StatuspageProvider)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleProviders.
+func (in *ConsoleProviders) DeepCopy() *ConsoleProviders {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleProviders)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ in.Customization.DeepCopyInto(&out.Customization)
+ in.Providers.DeepCopyInto(&out.Providers)
+ out.Route = in.Route
+ if in.Plugins != nil {
+ in, out := &in.Plugins, &out.Plugins
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec.
+func (in *ConsoleSpec) DeepCopy() *ConsoleSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus.
+func (in *ConsoleStatus) DeepCopy() *ConsoleStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ConsoleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerLoggingDestinationParameters) DeepCopyInto(out *ContainerLoggingDestinationParameters) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerLoggingDestinationParameters.
+func (in *ContainerLoggingDestinationParameters) DeepCopy() *ContainerLoggingDestinationParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerLoggingDestinationParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNS) DeepCopyInto(out *DNS) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
+func (in *DNS) DeepCopy() *DNS {
+ if in == nil {
+ return nil
+ }
+ out := new(DNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNS) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSCache) DeepCopyInto(out *DNSCache) {
+ *out = *in
+ out.PositiveTTL = in.PositiveTTL
+ out.NegativeTTL = in.NegativeTTL
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSCache.
+func (in *DNSCache) DeepCopy() *DNSCache {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSCache)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSList) DeepCopyInto(out *DNSList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DNS, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList.
+func (in *DNSList) DeepCopy() *DNSList {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DNSList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSNodePlacement) DeepCopyInto(out *DNSNodePlacement) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNodePlacement.
+func (in *DNSNodePlacement) DeepCopy() *DNSNodePlacement {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSNodePlacement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSOverTLSConfig) DeepCopyInto(out *DNSOverTLSConfig) {
+ *out = *in
+ out.CABundle = in.CABundle
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSOverTLSConfig.
+func (in *DNSOverTLSConfig) DeepCopy() *DNSOverTLSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSOverTLSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSSpec) DeepCopyInto(out *DNSSpec) {
+ *out = *in
+ if in.Servers != nil {
+ in, out := &in.Servers, &out.Servers
+ *out = make([]Server, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.UpstreamResolvers.DeepCopyInto(&out.UpstreamResolvers)
+ in.NodePlacement.DeepCopyInto(&out.NodePlacement)
+ out.Cache = in.Cache
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec.
+func (in *DNSSpec) DeepCopy() *DNSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSStatus) DeepCopyInto(out *DNSStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]OperatorCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus.
+func (in *DNSStatus) DeepCopy() *DNSStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DNSTransportConfig) DeepCopyInto(out *DNSTransportConfig) {
+ *out = *in
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = new(DNSOverTLSConfig)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSTransportConfig.
+func (in *DNSTransportConfig) DeepCopy() *DNSTransportConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DNSTransportConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) {
+ *out = *in
+ if in.OpenShiftSDNConfig != nil {
+ in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig
+ *out = new(OpenShiftSDNConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OVNKubernetesConfig != nil {
+ in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig
+ *out = new(OVNKubernetesConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition.
+func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition {
+ if in == nil {
+ return nil
+ }
+ out := new(DefaultNetworkDefinition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeveloperConsoleCatalogCategory) DeepCopyInto(out *DeveloperConsoleCatalogCategory) {
+ *out = *in
+ in.DeveloperConsoleCatalogCategoryMeta.DeepCopyInto(&out.DeveloperConsoleCatalogCategoryMeta)
+ if in.Subcategories != nil {
+ in, out := &in.Subcategories, &out.Subcategories
+ *out = make([]DeveloperConsoleCatalogCategoryMeta, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategory.
+func (in *DeveloperConsoleCatalogCategory) DeepCopy() *DeveloperConsoleCatalogCategory {
+ if in == nil {
+ return nil
+ }
+ out := new(DeveloperConsoleCatalogCategory)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopyInto(out *DeveloperConsoleCatalogCategoryMeta) {
+ *out = *in
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategoryMeta.
+func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopy() *DeveloperConsoleCatalogCategoryMeta {
+ if in == nil {
+ return nil
+ }
+ out := new(DeveloperConsoleCatalogCategoryMeta)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeveloperConsoleCatalogCustomization) DeepCopyInto(out *DeveloperConsoleCatalogCustomization) {
+ *out = *in
+ if in.Categories != nil {
+ in, out := &in.Categories, &out.Categories
+ *out = make([]DeveloperConsoleCatalogCategory, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Types.DeepCopyInto(&out.Types)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCustomization.
+func (in *DeveloperConsoleCatalogCustomization) DeepCopy() *DeveloperConsoleCatalogCustomization {
+ if in == nil {
+ return nil
+ }
+ out := new(DeveloperConsoleCatalogCustomization)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeveloperConsoleCatalogTypes) DeepCopyInto(out *DeveloperConsoleCatalogTypes) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new([]string)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ }
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = new([]string)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogTypes.
+func (in *DeveloperConsoleCatalogTypes) DeepCopy() *DeveloperConsoleCatalogTypes {
+ if in == nil {
+ return nil
+ }
+ out := new(DeveloperConsoleCatalogTypes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressIPConfig) DeepCopyInto(out *EgressIPConfig) {
+ *out = *in
+ if in.ReachabilityTotalTimeoutSeconds != nil {
+ in, out := &in.ReachabilityTotalTimeoutSeconds, &out.ReachabilityTotalTimeoutSeconds
+ *out = new(uint32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressIPConfig.
+func (in *EgressIPConfig) DeepCopy() *EgressIPConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressIPConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) {
+ *out = *in
+ if in.LoadBalancer != nil {
+ in, out := &in.LoadBalancer, &out.LoadBalancer
+ *out = new(LoadBalancerStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HostNetwork != nil {
+ in, out := &in.HostNetwork, &out.HostNetwork
+ *out = new(HostNetworkStrategy)
+ **out = **in
+ }
+ if in.Private != nil {
+ in, out := &in.Private, &out.Private
+ *out = new(PrivateStrategy)
+ **out = **in
+ }
+ if in.NodePort != nil {
+ in, out := &in.NodePort, &out.NodePort
+ *out = new(NodePortStrategy)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPublishingStrategy.
+func (in *EndpointPublishingStrategy) DeepCopy() *EndpointPublishingStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointPublishingStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Etcd) DeepCopyInto(out *Etcd) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd.
+func (in *Etcd) DeepCopy() *Etcd {
+ if in == nil {
+ return nil
+ }
+ out := new(Etcd)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Etcd) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdList) DeepCopyInto(out *EtcdList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Etcd, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList.
+func (in *EtcdList) DeepCopy() *EtcdList {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EtcdList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) {
+ *out = *in
+ in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec.
+func (in *EtcdSpec) DeepCopy() *EtcdSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) {
+ *out = *in
+ in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus.
+func (in *EtcdStatus) DeepCopy() *EtcdStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExportNetworkFlows) DeepCopyInto(out *ExportNetworkFlows) {
+ *out = *in
+ if in.NetFlow != nil {
+ in, out := &in.NetFlow, &out.NetFlow
+ *out = new(NetFlowConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SFlow != nil {
+ in, out := &in.SFlow, &out.SFlow
+ *out = new(SFlowConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IPFIX != nil {
+ in, out := &in.IPFIX, &out.IPFIX
+ *out = new(IPFIXConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportNetworkFlows.
+func (in *ExportNetworkFlows) DeepCopy() *ExportNetworkFlows {
+ if in == nil {
+ return nil
+ }
+ out := new(ExportNetworkFlows)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FeaturesMigration) DeepCopyInto(out *FeaturesMigration) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturesMigration.
+func (in *FeaturesMigration) DeepCopy() *FeaturesMigration {
+ if in == nil {
+ return nil
+ }
+ out := new(FeaturesMigration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) {
+ *out = *in
+ if in.Upstreams != nil {
+ in, out := &in.Upstreams, &out.Upstreams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.TransportConfig.DeepCopyInto(&out.TransportConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardPlugin.
+func (in *ForwardPlugin) DeepCopy() *ForwardPlugin {
+ if in == nil {
+ return nil
+ }
+ out := new(ForwardPlugin)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPCSIDriverConfigSpec) DeepCopyInto(out *GCPCSIDriverConfigSpec) {
+ *out = *in
+ if in.KMSKey != nil {
+ in, out := &in.KMSKey, &out.KMSKey
+ *out = new(GCPKMSKeyReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPCSIDriverConfigSpec.
+func (in *GCPCSIDriverConfigSpec) DeepCopy() *GCPCSIDriverConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPCSIDriverConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference.
+func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPKMSKeyReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPLoadBalancerParameters.
+func (in *GCPLoadBalancerParameters) DeepCopy() *GCPLoadBalancerParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(GCPLoadBalancerParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GatewayConfig) DeepCopyInto(out *GatewayConfig) {
+ *out = *in
+ out.IPv4 = in.IPv4
+ out.IPv6 = in.IPv6
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayConfig.
+func (in *GatewayConfig) DeepCopy() *GatewayConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GatewayConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GatherStatus) DeepCopyInto(out *GatherStatus) {
+ *out = *in
+ in.LastGatherTime.DeepCopyInto(&out.LastGatherTime)
+ out.LastGatherDuration = in.LastGatherDuration
+ if in.Gatherers != nil {
+ in, out := &in.Gatherers, &out.Gatherers
+ *out = make([]GathererStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherStatus.
+func (in *GatherStatus) DeepCopy() *GatherStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GatherStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GathererStatus) DeepCopyInto(out *GathererStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.LastGatherDuration = in.LastGatherDuration
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererStatus.
+func (in *GathererStatus) DeepCopy() *GathererStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GathererStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus.
+func (in *GenerationStatus) DeepCopy() *GenerationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GenerationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPCompressionPolicy) DeepCopyInto(out *HTTPCompressionPolicy) {
+ *out = *in
+ if in.MimeTypes != nil {
+ in, out := &in.MimeTypes, &out.MimeTypes
+ *out = make([]CompressionMIMEType, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCompressionPolicy.
+func (in *HTTPCompressionPolicy) DeepCopy() *HTTPCompressionPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPCompressionPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HealthCheck) DeepCopyInto(out *HealthCheck) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheck.
+func (in *HealthCheck) DeepCopy() *HealthCheck {
+ if in == nil {
+ return nil
+ }
+ out := new(HealthCheck)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNetworkStrategy.
+func (in *HostNetworkStrategy) DeepCopy() *HostNetworkStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(HostNetworkStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HybridOverlayConfig) DeepCopyInto(out *HybridOverlayConfig) {
+ *out = *in
+ if in.HybridClusterNetwork != nil {
+ in, out := &in.HybridClusterNetwork, &out.HybridClusterNetwork
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.HybridOverlayVXLANPort != nil {
+ in, out := &in.HybridOverlayVXLANPort, &out.HybridOverlayVXLANPort
+ *out = new(uint32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridOverlayConfig.
+func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(HybridOverlayConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMCloudCSIDriverConfigSpec) DeepCopyInto(out *IBMCloudCSIDriverConfigSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudCSIDriverConfigSpec.
+func (in *IBMCloudCSIDriverConfigSpec) DeepCopy() *IBMCloudCSIDriverConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMCloudCSIDriverConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IBMLoadBalancerParameters) DeepCopyInto(out *IBMLoadBalancerParameters) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMLoadBalancerParameters.
+func (in *IBMLoadBalancerParameters) DeepCopy() *IBMLoadBalancerParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(IBMLoadBalancerParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) {
+ *out = *in
+ if in.StaticIPAMConfig != nil {
+ in, out := &in.StaticIPAMConfig, &out.StaticIPAMConfig
+ *out = new(StaticIPAMConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig.
+func (in *IPAMConfig) DeepCopy() *IPAMConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAMConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPFIXConfig) DeepCopyInto(out *IPFIXConfig) {
+ *out = *in
+ if in.Collectors != nil {
+ in, out := &in.Collectors, &out.Collectors
+ *out = make([]IPPort, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFIXConfig.
+func (in *IPFIXConfig) DeepCopy() *IPFIXConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IPFIXConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecConfig.
+func (in *IPsecConfig) DeepCopy() *IPsecConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IPsecConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv4GatewayConfig) DeepCopyInto(out *IPv4GatewayConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4GatewayConfig.
+func (in *IPv4GatewayConfig) DeepCopy() *IPv4GatewayConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IPv4GatewayConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv4OVNKubernetesConfig) DeepCopyInto(out *IPv4OVNKubernetesConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4OVNKubernetesConfig.
+func (in *IPv4OVNKubernetesConfig) DeepCopy() *IPv4OVNKubernetesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IPv4OVNKubernetesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv6GatewayConfig) DeepCopyInto(out *IPv6GatewayConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6GatewayConfig.
+func (in *IPv6GatewayConfig) DeepCopy() *IPv6GatewayConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IPv6GatewayConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv6OVNKubernetesConfig) DeepCopyInto(out *IPv6OVNKubernetesConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6OVNKubernetesConfig.
+func (in *IPv6OVNKubernetesConfig) DeepCopy() *IPv6OVNKubernetesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(IPv6OVNKubernetesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressController) DeepCopyInto(out *IngressController) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController.
+func (in *IngressController) DeepCopy() *IngressController {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressController)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressController) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerCaptureHTTPCookie) DeepCopyInto(out *IngressControllerCaptureHTTPCookie) {
+ *out = *in
+ out.IngressControllerCaptureHTTPCookieUnion = in.IngressControllerCaptureHTTPCookieUnion
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookie.
+func (in *IngressControllerCaptureHTTPCookie) DeepCopy() *IngressControllerCaptureHTTPCookie {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerCaptureHTTPCookie)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopyInto(out *IngressControllerCaptureHTTPCookieUnion) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookieUnion.
+func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopy() *IngressControllerCaptureHTTPCookieUnion {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerCaptureHTTPCookieUnion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerCaptureHTTPHeader) DeepCopyInto(out *IngressControllerCaptureHTTPHeader) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeader.
+func (in *IngressControllerCaptureHTTPHeader) DeepCopy() *IngressControllerCaptureHTTPHeader {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerCaptureHTTPHeader)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerCaptureHTTPHeaders) DeepCopyInto(out *IngressControllerCaptureHTTPHeaders) {
+ *out = *in
+ if in.Request != nil {
+ in, out := &in.Request, &out.Request
+ *out = make([]IngressControllerCaptureHTTPHeader, len(*in))
+ copy(*out, *in)
+ }
+ if in.Response != nil {
+ in, out := &in.Response, &out.Response
+ *out = make([]IngressControllerCaptureHTTPHeader, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeaders.
+func (in *IngressControllerCaptureHTTPHeaders) DeepCopy() *IngressControllerCaptureHTTPHeaders {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerCaptureHTTPHeaders)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerHTTPHeader) DeepCopyInto(out *IngressControllerHTTPHeader) {
+ *out = *in
+ in.Action.DeepCopyInto(&out.Action)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeader.
+func (in *IngressControllerHTTPHeader) DeepCopy() *IngressControllerHTTPHeader {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerHTTPHeader)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerHTTPHeaderActionUnion) DeepCopyInto(out *IngressControllerHTTPHeaderActionUnion) {
+ *out = *in
+ if in.Set != nil {
+ in, out := &in.Set, &out.Set
+ *out = new(IngressControllerSetHTTPHeader)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActionUnion.
+func (in *IngressControllerHTTPHeaderActionUnion) DeepCopy() *IngressControllerHTTPHeaderActionUnion {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerHTTPHeaderActionUnion)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerHTTPHeaderActions) DeepCopyInto(out *IngressControllerHTTPHeaderActions) {
+ *out = *in
+ if in.Response != nil {
+ in, out := &in.Response, &out.Response
+ *out = make([]IngressControllerHTTPHeader, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Request != nil {
+ in, out := &in.Request, &out.Request
+ *out = make([]IngressControllerHTTPHeader, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActions.
+func (in *IngressControllerHTTPHeaderActions) DeepCopy() *IngressControllerHTTPHeaderActions {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerHTTPHeaderActions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPHeaders) {
+ *out = *in
+ out.UniqueId = in.UniqueId
+ if in.HeaderNameCaseAdjustments != nil {
+ in, out := &in.HeaderNameCaseAdjustments, &out.HeaderNameCaseAdjustments
+ *out = make([]IngressControllerHTTPHeaderNameCaseAdjustment, len(*in))
+ copy(*out, *in)
+ }
+ in.Actions.DeepCopyInto(&out.Actions)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaders.
+func (in *IngressControllerHTTPHeaders) DeepCopy() *IngressControllerHTTPHeaders {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerHTTPHeaders)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopyInto(out *IngressControllerHTTPUniqueIdHeaderPolicy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPUniqueIdHeaderPolicy.
+func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopy() *IngressControllerHTTPUniqueIdHeaderPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerHTTPUniqueIdHeaderPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerList) DeepCopyInto(out *IngressControllerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]IngressController, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerList.
+func (in *IngressControllerList) DeepCopy() *IngressControllerList {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressControllerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerLogging) DeepCopyInto(out *IngressControllerLogging) {
+ *out = *in
+ if in.Access != nil {
+ in, out := &in.Access, &out.Access
+ *out = new(AccessLogging)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerLogging.
+func (in *IngressControllerLogging) DeepCopy() *IngressControllerLogging {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerLogging)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerSetHTTPHeader) DeepCopyInto(out *IngressControllerSetHTTPHeader) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSetHTTPHeader.
+func (in *IngressControllerSetHTTPHeader) DeepCopy() *IngressControllerSetHTTPHeader {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerSetHTTPHeader)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) {
+ *out = *in
+ out.HttpErrorCodePages = in.HttpErrorCodePages
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.EndpointPublishingStrategy != nil {
+ in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy
+ *out = new(EndpointPublishingStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.DefaultCertificate != nil {
+ in, out := &in.DefaultCertificate, &out.DefaultCertificate
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RouteSelector != nil {
+ in, out := &in.RouteSelector, &out.RouteSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NodePlacement != nil {
+ in, out := &in.NodePlacement, &out.NodePlacement
+ *out = new(NodePlacement)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TLSSecurityProfile != nil {
+ in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile
+ *out = new(configv1.TLSSecurityProfile)
+ (*in).DeepCopyInto(*out)
+ }
+ in.ClientTLS.DeepCopyInto(&out.ClientTLS)
+ if in.RouteAdmission != nil {
+ in, out := &in.RouteAdmission, &out.RouteAdmission
+ *out = new(RouteAdmissionPolicy)
+ **out = **in
+ }
+ if in.Logging != nil {
+ in, out := &in.Logging, &out.Logging
+ *out = new(IngressControllerLogging)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.HTTPHeaders != nil {
+ in, out := &in.HTTPHeaders, &out.HTTPHeaders
+ *out = new(IngressControllerHTTPHeaders)
+ (*in).DeepCopyInto(*out)
+ }
+ in.TuningOptions.DeepCopyInto(&out.TuningOptions)
+ in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides)
+ in.HTTPCompression.DeepCopyInto(&out.HTTPCompression)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSpec.
+func (in *IngressControllerSpec) DeepCopy() *IngressControllerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) {
+ *out = *in
+ if in.EndpointPublishingStrategy != nil {
+ in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy
+ *out = new(EndpointPublishingStrategy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]OperatorCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.TLSProfile != nil {
+ in, out := &in.TLSProfile, &out.TLSProfile
+ *out = new(configv1.TLSProfileSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RouteSelector != nil {
+ in, out := &in.RouteSelector, &out.RouteSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerStatus.
+func (in *IngressControllerStatus) DeepCopy() *IngressControllerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTuningOptions) {
+ *out = *in
+ if in.ClientTimeout != nil {
+ in, out := &in.ClientTimeout, &out.ClientTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ClientFinTimeout != nil {
+ in, out := &in.ClientFinTimeout, &out.ClientFinTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ServerTimeout != nil {
+ in, out := &in.ServerTimeout, &out.ServerTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ServerFinTimeout != nil {
+ in, out := &in.ServerFinTimeout, &out.ServerFinTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.TunnelTimeout != nil {
+ in, out := &in.TunnelTimeout, &out.TunnelTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.ConnectTimeout != nil {
+ in, out := &in.ConnectTimeout, &out.ConnectTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.TLSInspectDelay != nil {
+ in, out := &in.TLSInspectDelay, &out.TLSInspectDelay
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.HealthCheckInterval != nil {
+ in, out := &in.HealthCheckInterval, &out.HealthCheckInterval
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ out.ReloadInterval = in.ReloadInterval
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerTuningOptions.
+func (in *IngressControllerTuningOptions) DeepCopy() *IngressControllerTuningOptions {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressControllerTuningOptions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsOperator) DeepCopyInto(out *InsightsOperator) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperator.
+func (in *InsightsOperator) DeepCopy() *InsightsOperator {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsOperator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InsightsOperator) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsOperatorList) DeepCopyInto(out *InsightsOperatorList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]InsightsOperator, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorList.
+func (in *InsightsOperatorList) DeepCopy() *InsightsOperatorList {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsOperatorList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InsightsOperatorList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsOperatorSpec) DeepCopyInto(out *InsightsOperatorSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorSpec.
+func (in *InsightsOperatorSpec) DeepCopy() *InsightsOperatorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsOperatorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsOperatorStatus) DeepCopyInto(out *InsightsOperatorStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ in.GatherStatus.DeepCopyInto(&out.GatherStatus)
+ in.InsightsReport.DeepCopyInto(&out.InsightsReport)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsOperatorStatus.
+func (in *InsightsOperatorStatus) DeepCopy() *InsightsOperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsOperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InsightsReport) DeepCopyInto(out *InsightsReport) {
+ *out = *in
+ in.DownloadedAt.DeepCopyInto(&out.DownloadedAt)
+ if in.HealthChecks != nil {
+ in, out := &in.HealthChecks, &out.HealthChecks
+ *out = make([]HealthCheck, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsReport.
+func (in *InsightsReport) DeepCopy() *InsightsReport {
+ if in == nil {
+ return nil
+ }
+ out := new(InsightsReport)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServer.
+func (in *KubeAPIServer) DeepCopy() *KubeAPIServer {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeAPIServer) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerList) DeepCopyInto(out *KubeAPIServerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]KubeAPIServer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerList.
+func (in *KubeAPIServerList) DeepCopy() *KubeAPIServerList {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeAPIServerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerSpec) DeepCopyInto(out *KubeAPIServerSpec) {
+ *out = *in
+ in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerSpec.
+func (in *KubeAPIServerSpec) DeepCopy() *KubeAPIServerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeAPIServerStatus) DeepCopyInto(out *KubeAPIServerStatus) {
+ *out = *in
+ in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus)
+ if in.ServiceAccountIssuers != nil {
+ in, out := &in.ServiceAccountIssuers, &out.ServiceAccountIssuers
+ *out = make([]ServiceAccountIssuerStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerStatus.
+func (in *KubeAPIServerStatus) DeepCopy() *KubeAPIServerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeAPIServerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManager) DeepCopyInto(out *KubeControllerManager) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManager.
+func (in *KubeControllerManager) DeepCopy() *KubeControllerManager {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManager)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeControllerManager) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerList) DeepCopyInto(out *KubeControllerManagerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]KubeControllerManager, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerList.
+func (in *KubeControllerManagerList) DeepCopy() *KubeControllerManagerList {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeControllerManagerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerSpec) DeepCopyInto(out *KubeControllerManagerSpec) {
+ *out = *in
+ in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerSpec.
+func (in *KubeControllerManagerSpec) DeepCopy() *KubeControllerManagerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeControllerManagerStatus) DeepCopyInto(out *KubeControllerManagerStatus) {
+ *out = *in
+ in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerStatus.
+func (in *KubeControllerManagerStatus) DeepCopy() *KubeControllerManagerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeControllerManagerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeScheduler) DeepCopyInto(out *KubeScheduler) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeScheduler.
+func (in *KubeScheduler) DeepCopy() *KubeScheduler {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeScheduler)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeScheduler) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeSchedulerList) DeepCopyInto(out *KubeSchedulerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]KubeScheduler, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerList.
+func (in *KubeSchedulerList) DeepCopy() *KubeSchedulerList {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeSchedulerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeSchedulerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeSchedulerSpec) DeepCopyInto(out *KubeSchedulerSpec) {
+ *out = *in
+ in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerSpec.
+func (in *KubeSchedulerSpec) DeepCopy() *KubeSchedulerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeSchedulerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeSchedulerStatus) DeepCopyInto(out *KubeSchedulerStatus) {
+ *out = *in
+ in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerStatus.
+func (in *KubeSchedulerStatus) DeepCopy() *KubeSchedulerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeSchedulerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeStorageVersionMigrator) DeepCopyInto(out *KubeStorageVersionMigrator) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigrator.
+func (in *KubeStorageVersionMigrator) DeepCopy() *KubeStorageVersionMigrator {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeStorageVersionMigrator)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeStorageVersionMigrator) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeStorageVersionMigratorList) DeepCopyInto(out *KubeStorageVersionMigratorList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]KubeStorageVersionMigrator, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorList.
+func (in *KubeStorageVersionMigratorList) DeepCopy() *KubeStorageVersionMigratorList {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeStorageVersionMigratorList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KubeStorageVersionMigratorList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeStorageVersionMigratorSpec) DeepCopyInto(out *KubeStorageVersionMigratorSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorSpec.
+func (in *KubeStorageVersionMigratorSpec) DeepCopy() *KubeStorageVersionMigratorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeStorageVersionMigratorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KubeStorageVersionMigratorStatus) DeepCopyInto(out *KubeStorageVersionMigratorStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorStatus.
+func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigratorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(KubeStorageVersionMigratorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) {
+ *out = *in
+ if in.AllowedSourceRanges != nil {
+ in, out := &in.AllowedSourceRanges, &out.AllowedSourceRanges
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ if in.ProviderParameters != nil {
+ in, out := &in.ProviderParameters, &out.ProviderParameters
+ *out = new(ProviderLoadBalancerParameters)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStrategy.
+func (in *LoadBalancerStrategy) DeepCopy() *LoadBalancerStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(LoadBalancerStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoggingDestination) DeepCopyInto(out *LoggingDestination) {
+ *out = *in
+ if in.Syslog != nil {
+ in, out := &in.Syslog, &out.Syslog
+ *out = new(SyslogLoggingDestinationParameters)
+ **out = **in
+ }
+ if in.Container != nil {
+ in, out := &in.Container, &out.Container
+ *out = new(ContainerLoggingDestinationParameters)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestination.
+func (in *LoggingDestination) DeepCopy() *LoggingDestination {
+ if in == nil {
+ return nil
+ }
+ out := new(LoggingDestination)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MTUMigration) DeepCopyInto(out *MTUMigration) {
+ *out = *in
+ if in.Network != nil {
+ in, out := &in.Network, &out.Network
+ *out = new(MTUMigrationValues)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Machine != nil {
+ in, out := &in.Machine, &out.Machine
+ *out = new(MTUMigrationValues)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration.
+func (in *MTUMigration) DeepCopy() *MTUMigration {
+ if in == nil {
+ return nil
+ }
+ out := new(MTUMigration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) {
+ *out = *in
+ if in.To != nil {
+ in, out := &in.To, &out.To
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = new(uint32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues.
+func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues {
+ if in == nil {
+ return nil
+ }
+ out := new(MTUMigrationValues)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineConfiguration) DeepCopyInto(out *MachineConfiguration) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfiguration.
+func (in *MachineConfiguration) DeepCopy() *MachineConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MachineConfiguration) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineConfigurationList) DeepCopyInto(out *MachineConfigurationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MachineConfiguration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationList.
+func (in *MachineConfigurationList) DeepCopy() *MachineConfigurationList {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineConfigurationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MachineConfigurationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineConfigurationSpec) DeepCopyInto(out *MachineConfigurationSpec) {
+ *out = *in
+ in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec)
+ in.ManagedBootImages.DeepCopyInto(&out.ManagedBootImages)
+ in.NodeDisruptionPolicy.DeepCopyInto(&out.NodeDisruptionPolicy)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationSpec.
+func (in *MachineConfigurationSpec) DeepCopy() *MachineConfigurationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineConfigurationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineConfigurationStatus) DeepCopyInto(out *MachineConfigurationStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.NodeDisruptionPolicyStatus.DeepCopyInto(&out.NodeDisruptionPolicyStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigurationStatus.
+func (in *MachineConfigurationStatus) DeepCopy() *MachineConfigurationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineConfigurationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineManager) DeepCopyInto(out *MachineManager) {
+ *out = *in
+ in.Selection.DeepCopyInto(&out.Selection)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineManager.
+func (in *MachineManager) DeepCopy() *MachineManager {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineManager)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MachineManagerSelector) DeepCopyInto(out *MachineManagerSelector) {
+ *out = *in
+ if in.Partial != nil {
+ in, out := &in.Partial, &out.Partial
+ *out = new(PartialSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineManagerSelector.
+func (in *MachineManagerSelector) DeepCopy() *MachineManagerSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(MachineManagerSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedBootImages) DeepCopyInto(out *ManagedBootImages) {
+ *out = *in
+ if in.MachineManagers != nil {
+ in, out := &in.MachineManagers, &out.MachineManagers
+ *out = make([]MachineManager, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedBootImages.
+func (in *ManagedBootImages) DeepCopy() *ManagedBootImages {
+ if in == nil {
+ return nil
+ }
+ out := new(ManagedBootImages)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MyOperatorResource) DeepCopyInto(out *MyOperatorResource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResource.
+func (in *MyOperatorResource) DeepCopy() *MyOperatorResource {
+ if in == nil {
+ return nil
+ }
+ out := new(MyOperatorResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MyOperatorResourceSpec) DeepCopyInto(out *MyOperatorResourceSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceSpec.
+func (in *MyOperatorResourceSpec) DeepCopy() *MyOperatorResourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MyOperatorResourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MyOperatorResourceStatus) DeepCopyInto(out *MyOperatorResourceStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceStatus.
+func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(MyOperatorResourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetFlowConfig) DeepCopyInto(out *NetFlowConfig) {
+ *out = *in
+ if in.Collectors != nil {
+ in, out := &in.Collectors, &out.Collectors
+ *out = make([]IPPort, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetFlowConfig.
+func (in *NetFlowConfig) DeepCopy() *NetFlowConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NetFlowConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Network) DeepCopyInto(out *Network) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
+func (in *Network) DeepCopy() *Network {
+ if in == nil {
+ return nil
+ }
+ out := new(Network)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Network) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkList) DeepCopyInto(out *NetworkList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Network, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList.
+func (in *NetworkList) DeepCopy() *NetworkList {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) {
+ *out = *in
+ if in.MTU != nil {
+ in, out := &in.MTU, &out.MTU
+ *out = new(MTUMigration)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Features != nil {
+ in, out := &in.Features, &out.Features
+ *out = new(FeaturesMigration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration.
+func (in *NetworkMigration) DeepCopy() *NetworkMigration {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkMigration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ if in.ClusterNetwork != nil {
+ in, out := &in.ClusterNetwork, &out.ClusterNetwork
+ *out = make([]ClusterNetworkEntry, len(*in))
+ copy(*out, *in)
+ }
+ if in.ServiceNetwork != nil {
+ in, out := &in.ServiceNetwork, &out.ServiceNetwork
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork)
+ if in.AdditionalNetworks != nil {
+ in, out := &in.AdditionalNetworks, &out.AdditionalNetworks
+ *out = make([]AdditionalNetworkDefinition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DisableMultiNetwork != nil {
+ in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork
+ *out = new(bool)
+ **out = **in
+ }
+ if in.UseMultiNetworkPolicy != nil {
+ in, out := &in.UseMultiNetworkPolicy, &out.UseMultiNetworkPolicy
+ *out = new(bool)
+ **out = **in
+ }
+ if in.DeployKubeProxy != nil {
+ in, out := &in.DeployKubeProxy, &out.DeployKubeProxy
+ *out = new(bool)
+ **out = **in
+ }
+ if in.KubeProxyConfig != nil {
+ in, out := &in.KubeProxyConfig, &out.KubeProxyConfig
+ *out = new(ProxyConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ExportNetworkFlows != nil {
+ in, out := &in.ExportNetworkFlows, &out.ExportNetworkFlows
+ *out = new(ExportNetworkFlows)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Migration != nil {
+ in, out := &in.Migration, &out.Migration
+ *out = new(NetworkMigration)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
+func (in *NetworkSpec) DeepCopy() *NetworkSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus.
+func (in *NetworkStatus) DeepCopy() *NetworkStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicyClusterStatus) DeepCopyInto(out *NodeDisruptionPolicyClusterStatus) {
+ *out = *in
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = make([]NodeDisruptionPolicyStatusFile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Units != nil {
+ in, out := &in.Units, &out.Units
+ *out = make([]NodeDisruptionPolicyStatusUnit, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.SSHKey.DeepCopyInto(&out.SSHKey)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyClusterStatus.
+func (in *NodeDisruptionPolicyClusterStatus) DeepCopy() *NodeDisruptionPolicyClusterStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicyClusterStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicyConfig) DeepCopyInto(out *NodeDisruptionPolicyConfig) {
+ *out = *in
+ if in.Files != nil {
+ in, out := &in.Files, &out.Files
+ *out = make([]NodeDisruptionPolicySpecFile, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Units != nil {
+ in, out := &in.Units, &out.Units
+ *out = make([]NodeDisruptionPolicySpecUnit, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.SSHKey.DeepCopyInto(&out.SSHKey)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyConfig.
+func (in *NodeDisruptionPolicyConfig) DeepCopy() *NodeDisruptionPolicyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicySpecAction) DeepCopyInto(out *NodeDisruptionPolicySpecAction) {
+ *out = *in
+ if in.Reload != nil {
+ in, out := &in.Reload, &out.Reload
+ *out = new(ReloadService)
+ **out = **in
+ }
+ if in.Restart != nil {
+ in, out := &in.Restart, &out.Restart
+ *out = new(RestartService)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecAction.
+func (in *NodeDisruptionPolicySpecAction) DeepCopy() *NodeDisruptionPolicySpecAction {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicySpecAction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicySpecFile) DeepCopyInto(out *NodeDisruptionPolicySpecFile) {
+ *out = *in
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]NodeDisruptionPolicySpecAction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecFile.
+func (in *NodeDisruptionPolicySpecFile) DeepCopy() *NodeDisruptionPolicySpecFile {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicySpecFile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicySpecSSHKey) DeepCopyInto(out *NodeDisruptionPolicySpecSSHKey) {
+ *out = *in
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]NodeDisruptionPolicySpecAction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecSSHKey.
+func (in *NodeDisruptionPolicySpecSSHKey) DeepCopy() *NodeDisruptionPolicySpecSSHKey {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicySpecSSHKey)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicySpecUnit) DeepCopyInto(out *NodeDisruptionPolicySpecUnit) {
+ *out = *in
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]NodeDisruptionPolicySpecAction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicySpecUnit.
+func (in *NodeDisruptionPolicySpecUnit) DeepCopy() *NodeDisruptionPolicySpecUnit {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicySpecUnit)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicyStatus) DeepCopyInto(out *NodeDisruptionPolicyStatus) {
+ *out = *in
+ in.ClusterPolicies.DeepCopyInto(&out.ClusterPolicies)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatus.
+func (in *NodeDisruptionPolicyStatus) DeepCopy() *NodeDisruptionPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicyStatusAction) DeepCopyInto(out *NodeDisruptionPolicyStatusAction) {
+ *out = *in
+ if in.Reload != nil {
+ in, out := &in.Reload, &out.Reload
+ *out = new(ReloadService)
+ **out = **in
+ }
+ if in.Restart != nil {
+ in, out := &in.Restart, &out.Restart
+ *out = new(RestartService)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusAction.
+func (in *NodeDisruptionPolicyStatusAction) DeepCopy() *NodeDisruptionPolicyStatusAction {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicyStatusAction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicyStatusFile) DeepCopyInto(out *NodeDisruptionPolicyStatusFile) {
+ *out = *in
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]NodeDisruptionPolicyStatusAction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusFile.
+func (in *NodeDisruptionPolicyStatusFile) DeepCopy() *NodeDisruptionPolicyStatusFile {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicyStatusFile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicyStatusSSHKey) DeepCopyInto(out *NodeDisruptionPolicyStatusSSHKey) {
+ *out = *in
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]NodeDisruptionPolicyStatusAction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusSSHKey.
+func (in *NodeDisruptionPolicyStatusSSHKey) DeepCopy() *NodeDisruptionPolicyStatusSSHKey {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicyStatusSSHKey)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDisruptionPolicyStatusUnit) DeepCopyInto(out *NodeDisruptionPolicyStatusUnit) {
+ *out = *in
+ if in.Actions != nil {
+ in, out := &in.Actions, &out.Actions
+ *out = make([]NodeDisruptionPolicyStatusAction, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDisruptionPolicyStatusUnit.
+func (in *NodeDisruptionPolicyStatusUnit) DeepCopy() *NodeDisruptionPolicyStatusUnit {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeDisruptionPolicyStatusUnit)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePlacement) DeepCopyInto(out *NodePlacement) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]corev1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement.
+func (in *NodePlacement) DeepCopy() *NodePlacement {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePlacement)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodePortStrategy) DeepCopyInto(out *NodePortStrategy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortStrategy.
+func (in *NodePortStrategy) DeepCopy() *NodePortStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(NodePortStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
+ *out = *in
+ if in.LastFailedTime != nil {
+ in, out := &in.LastFailedTime, &out.LastFailedTime
+ *out = (*in).DeepCopy()
+ }
+ if in.LastFailedRevisionErrors != nil {
+ in, out := &in.LastFailedRevisionErrors, &out.LastFailedRevisionErrors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
+func (in *NodeStatus) DeepCopy() *NodeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthAPIServerStatus) DeepCopyInto(out *OAuthAPIServerStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAPIServerStatus.
+func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthAPIServerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) {
+ *out = *in
+ if in.MTU != nil {
+ in, out := &in.MTU, &out.MTU
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.GenevePort != nil {
+ in, out := &in.GenevePort, &out.GenevePort
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.HybridOverlayConfig != nil {
+ in, out := &in.HybridOverlayConfig, &out.HybridOverlayConfig
+ *out = new(HybridOverlayConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IPsecConfig != nil {
+ in, out := &in.IPsecConfig, &out.IPsecConfig
+ *out = new(IPsecConfig)
+ **out = **in
+ }
+ if in.PolicyAuditConfig != nil {
+ in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig
+ *out = new(PolicyAuditConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GatewayConfig != nil {
+ in, out := &in.GatewayConfig, &out.GatewayConfig
+ *out = new(GatewayConfig)
+ **out = **in
+ }
+ in.EgressIPConfig.DeepCopyInto(&out.EgressIPConfig)
+ if in.IPv4 != nil {
+ in, out := &in.IPv4, &out.IPv4
+ *out = new(IPv4OVNKubernetesConfig)
+ **out = **in
+ }
+ if in.IPv6 != nil {
+ in, out := &in.IPv6, &out.IPv6
+ *out = new(IPv6OVNKubernetesConfig)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig.
+func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OVNKubernetesConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServer.
+func (in *OpenShiftAPIServer) DeepCopy() *OpenShiftAPIServer {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftAPIServer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenShiftAPIServer) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftAPIServerList) DeepCopyInto(out *OpenShiftAPIServerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OpenShiftAPIServer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerList.
+func (in *OpenShiftAPIServerList) DeepCopy() *OpenShiftAPIServerList {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftAPIServerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenShiftAPIServerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftAPIServerSpec) DeepCopyInto(out *OpenShiftAPIServerSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerSpec.
+func (in *OpenShiftAPIServerSpec) DeepCopy() *OpenShiftAPIServerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftAPIServerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftAPIServerStatus) DeepCopyInto(out *OpenShiftAPIServerStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerStatus.
+func (in *OpenShiftAPIServerStatus) DeepCopy() *OpenShiftAPIServerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftAPIServerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftControllerManager) DeepCopyInto(out *OpenShiftControllerManager) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManager.
+func (in *OpenShiftControllerManager) DeepCopy() *OpenShiftControllerManager {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftControllerManager)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenShiftControllerManager) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftControllerManagerList) DeepCopyInto(out *OpenShiftControllerManagerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OpenShiftControllerManager, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerList.
+func (in *OpenShiftControllerManagerList) DeepCopy() *OpenShiftControllerManagerList {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftControllerManagerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenShiftControllerManagerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftControllerManagerSpec) DeepCopyInto(out *OpenShiftControllerManagerSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerSpec.
+func (in *OpenShiftControllerManagerSpec) DeepCopy() *OpenShiftControllerManagerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftControllerManagerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftControllerManagerStatus) DeepCopyInto(out *OpenShiftControllerManagerStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerStatus.
+func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManagerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftControllerManagerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) {
+ *out = *in
+ if in.VXLANPort != nil {
+ in, out := &in.VXLANPort, &out.VXLANPort
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.MTU != nil {
+ in, out := &in.MTU, &out.MTU
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.UseExternalOpenvswitch != nil {
+ in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch
+ *out = new(bool)
+ **out = **in
+ }
+ if in.EnableUnidling != nil {
+ in, out := &in.EnableUnidling, &out.EnableUnidling
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig.
+func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenShiftSDNConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition.
+func (in *OperatorCondition) DeepCopy() *OperatorCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) {
+ *out = *in
+ in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides)
+ in.ObservedConfig.DeepCopyInto(&out.ObservedConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec.
+func (in *OperatorSpec) DeepCopy() *OperatorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]OperatorCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Generations != nil {
+ in, out := &in.Generations, &out.Generations
+ *out = make([]GenerationStatus, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus.
+func (in *OperatorStatus) DeepCopy() *OperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialSelector) DeepCopyInto(out *PartialSelector) {
+ *out = *in
+ if in.MachineResourceSelector != nil {
+ in, out := &in.MachineResourceSelector, &out.MachineResourceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialSelector.
+func (in *PartialSelector) DeepCopy() *PartialSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(PartialSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Perspective) DeepCopyInto(out *Perspective) {
+ *out = *in
+ in.Visibility.DeepCopyInto(&out.Visibility)
+ if in.PinnedResources != nil {
+ in, out := &in.PinnedResources, &out.PinnedResources
+ *out = new([]PinnedResourceReference)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]PinnedResourceReference, len(*in))
+ copy(*out, *in)
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Perspective.
+func (in *Perspective) DeepCopy() *Perspective {
+ if in == nil {
+ return nil
+ }
+ out := new(Perspective)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PerspectiveVisibility) DeepCopyInto(out *PerspectiveVisibility) {
+ *out = *in
+ if in.AccessReview != nil {
+ in, out := &in.AccessReview, &out.AccessReview
+ *out = new(ResourceAttributesAccessReview)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerspectiveVisibility.
+func (in *PerspectiveVisibility) DeepCopy() *PerspectiveVisibility {
+ if in == nil {
+ return nil
+ }
+ out := new(PerspectiveVisibility)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PinnedResourceReference) DeepCopyInto(out *PinnedResourceReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PinnedResourceReference.
+func (in *PinnedResourceReference) DeepCopy() *PinnedResourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(PinnedResourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyAuditConfig) DeepCopyInto(out *PolicyAuditConfig) {
+ *out = *in
+ if in.RateLimit != nil {
+ in, out := &in.RateLimit, &out.RateLimit
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.MaxFileSize != nil {
+ in, out := &in.MaxFileSize, &out.MaxFileSize
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.MaxLogFiles != nil {
+ in, out := &in.MaxLogFiles, &out.MaxLogFiles
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAuditConfig.
+func (in *PolicyAuditConfig) DeepCopy() *PolicyAuditConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyAuditConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrivateStrategy) DeepCopyInto(out *PrivateStrategy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateStrategy.
+func (in *PrivateStrategy) DeepCopy() *PrivateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(PrivateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectAccess) DeepCopyInto(out *ProjectAccess) {
+ *out = *in
+ if in.AvailableClusterRoles != nil {
+ in, out := &in.AvailableClusterRoles, &out.AvailableClusterRoles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectAccess.
+func (in *ProjectAccess) DeepCopy() *ProjectAccess {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectAccess)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProviderLoadBalancerParameters) DeepCopyInto(out *ProviderLoadBalancerParameters) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSLoadBalancerParameters)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GCP != nil {
+ in, out := &in.GCP, &out.GCP
+ *out = new(GCPLoadBalancerParameters)
+ **out = **in
+ }
+ if in.IBM != nil {
+ in, out := &in.IBM, &out.IBM
+ *out = new(IBMLoadBalancerParameters)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderLoadBalancerParameters.
+func (in *ProviderLoadBalancerParameters) DeepCopy() *ProviderLoadBalancerParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(ProviderLoadBalancerParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ProxyArgumentList) DeepCopyInto(out *ProxyArgumentList) {
+ {
+ in := &in
+ *out = make(ProxyArgumentList, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyArgumentList.
+func (in ProxyArgumentList) DeepCopy() ProxyArgumentList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyArgumentList)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
+ *out = *in
+ if in.ProxyArguments != nil {
+ in, out := &in.ProxyArguments, &out.ProxyArguments
+ *out = make(map[string]ProxyArgumentList, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make(ProxyArgumentList, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig.
+func (in *ProxyConfig) DeepCopy() *ProxyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuickStarts) DeepCopyInto(out *QuickStarts) {
+ *out = *in
+ if in.Disabled != nil {
+ in, out := &in.Disabled, &out.Disabled
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickStarts.
+func (in *QuickStarts) DeepCopy() *QuickStarts {
+ if in == nil {
+ return nil
+ }
+ out := new(QuickStarts)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReloadService) DeepCopyInto(out *ReloadService) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReloadService.
+func (in *ReloadService) DeepCopy() *ReloadService {
+ if in == nil {
+ return nil
+ }
+ out := new(ReloadService)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceAttributesAccessReview) DeepCopyInto(out *ResourceAttributesAccessReview) {
+ *out = *in
+ if in.Required != nil {
+ in, out := &in.Required, &out.Required
+ *out = make([]authorizationv1.ResourceAttributes, len(*in))
+ copy(*out, *in)
+ }
+ if in.Missing != nil {
+ in, out := &in.Missing, &out.Missing
+ *out = make([]authorizationv1.ResourceAttributes, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributesAccessReview.
+func (in *ResourceAttributesAccessReview) DeepCopy() *ResourceAttributesAccessReview {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceAttributesAccessReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RestartService) DeepCopyInto(out *RestartService) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartService.
+func (in *RestartService) DeepCopy() *RestartService {
+ if in == nil {
+ return nil
+ }
+ out := new(RestartService)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RouteAdmissionPolicy) DeepCopyInto(out *RouteAdmissionPolicy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdmissionPolicy.
+func (in *RouteAdmissionPolicy) DeepCopy() *RouteAdmissionPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(RouteAdmissionPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SFlowConfig) DeepCopyInto(out *SFlowConfig) {
+ *out = *in
+ if in.Collectors != nil {
+ in, out := &in.Collectors, &out.Collectors
+ *out = make([]IPPort, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFlowConfig.
+func (in *SFlowConfig) DeepCopy() *SFlowConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SFlowConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Server) DeepCopyInto(out *Server) {
+ *out = *in
+ if in.Zones != nil {
+ in, out := &in.Zones, &out.Zones
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.ForwardPlugin.DeepCopyInto(&out.ForwardPlugin)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server.
+func (in *Server) DeepCopy() *Server {
+ if in == nil {
+ return nil
+ }
+ out := new(Server)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountIssuerStatus) DeepCopyInto(out *ServiceAccountIssuerStatus) {
+ *out = *in
+ if in.ExpirationTime != nil {
+ in, out := &in.ExpirationTime, &out.ExpirationTime
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountIssuerStatus.
+func (in *ServiceAccountIssuerStatus) DeepCopy() *ServiceAccountIssuerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceAccountIssuerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCA) DeepCopyInto(out *ServiceCA) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCA.
+func (in *ServiceCA) DeepCopy() *ServiceCA {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCA)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCA) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCAList) DeepCopyInto(out *ServiceCAList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ServiceCA, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAList.
+func (in *ServiceCAList) DeepCopy() *ServiceCAList {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCAList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCAList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCASpec) DeepCopyInto(out *ServiceCASpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCASpec.
+func (in *ServiceCASpec) DeepCopy() *ServiceCASpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCASpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCAStatus) DeepCopyInto(out *ServiceCAStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAStatus.
+func (in *ServiceCAStatus) DeepCopy() *ServiceCAStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCAStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogAPIServer) DeepCopyInto(out *ServiceCatalogAPIServer) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServer.
+func (in *ServiceCatalogAPIServer) DeepCopy() *ServiceCatalogAPIServer {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogAPIServer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCatalogAPIServer) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogAPIServerList) DeepCopyInto(out *ServiceCatalogAPIServerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ServiceCatalogAPIServer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerList.
+func (in *ServiceCatalogAPIServerList) DeepCopy() *ServiceCatalogAPIServerList {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogAPIServerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCatalogAPIServerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogAPIServerSpec) DeepCopyInto(out *ServiceCatalogAPIServerSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerSpec.
+func (in *ServiceCatalogAPIServerSpec) DeepCopy() *ServiceCatalogAPIServerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogAPIServerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogAPIServerStatus) DeepCopyInto(out *ServiceCatalogAPIServerStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerStatus.
+func (in *ServiceCatalogAPIServerStatus) DeepCopy() *ServiceCatalogAPIServerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogAPIServerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogControllerManager) DeepCopyInto(out *ServiceCatalogControllerManager) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManager.
+func (in *ServiceCatalogControllerManager) DeepCopy() *ServiceCatalogControllerManager {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogControllerManager)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCatalogControllerManager) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogControllerManagerList) DeepCopyInto(out *ServiceCatalogControllerManagerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ServiceCatalogControllerManager, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerList.
+func (in *ServiceCatalogControllerManagerList) DeepCopy() *ServiceCatalogControllerManagerList {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogControllerManagerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCatalogControllerManagerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogControllerManagerSpec) DeepCopyInto(out *ServiceCatalogControllerManagerSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerSpec.
+func (in *ServiceCatalogControllerManagerSpec) DeepCopy() *ServiceCatalogControllerManagerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogControllerManagerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCatalogControllerManagerStatus) DeepCopyInto(out *ServiceCatalogControllerManagerStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerStatus.
+func (in *ServiceCatalogControllerManagerStatus) DeepCopy() *ServiceCatalogControllerManagerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCatalogControllerManagerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SimpleMacvlanConfig) DeepCopyInto(out *SimpleMacvlanConfig) {
+ *out = *in
+ if in.IPAMConfig != nil {
+ in, out := &in.IPAMConfig, &out.IPAMConfig
+ *out = new(IPAMConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleMacvlanConfig.
+func (in *SimpleMacvlanConfig) DeepCopy() *SimpleMacvlanConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SimpleMacvlanConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaticIPAMAddresses) DeepCopyInto(out *StaticIPAMAddresses) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMAddresses.
+func (in *StaticIPAMAddresses) DeepCopy() *StaticIPAMAddresses {
+ if in == nil {
+ return nil
+ }
+ out := new(StaticIPAMAddresses)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaticIPAMConfig) DeepCopyInto(out *StaticIPAMConfig) {
+ *out = *in
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]StaticIPAMAddresses, len(*in))
+ copy(*out, *in)
+ }
+ if in.Routes != nil {
+ in, out := &in.Routes, &out.Routes
+ *out = make([]StaticIPAMRoutes, len(*in))
+ copy(*out, *in)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = new(StaticIPAMDNS)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMConfig.
+func (in *StaticIPAMConfig) DeepCopy() *StaticIPAMConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(StaticIPAMConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaticIPAMDNS) DeepCopyInto(out *StaticIPAMDNS) {
+ *out = *in
+ if in.Nameservers != nil {
+ in, out := &in.Nameservers, &out.Nameservers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Search != nil {
+ in, out := &in.Search, &out.Search
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMDNS.
+func (in *StaticIPAMDNS) DeepCopy() *StaticIPAMDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(StaticIPAMDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaticIPAMRoutes) DeepCopyInto(out *StaticIPAMRoutes) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMRoutes.
+func (in *StaticIPAMRoutes) DeepCopy() *StaticIPAMRoutes {
+ if in == nil {
+ return nil
+ }
+ out := new(StaticIPAMRoutes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaticPodOperatorSpec) DeepCopyInto(out *StaticPodOperatorSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorSpec.
+func (in *StaticPodOperatorSpec) DeepCopy() *StaticPodOperatorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StaticPodOperatorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ if in.NodeStatuses != nil {
+ in, out := &in.NodeStatuses, &out.NodeStatuses
+ *out = make([]NodeStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus.
+func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StaticPodOperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatuspageProvider) DeepCopyInto(out *StatuspageProvider) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatuspageProvider.
+func (in *StatuspageProvider) DeepCopy() *StatuspageProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(StatuspageProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Storage) DeepCopyInto(out *Storage) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage.
+func (in *Storage) DeepCopy() *Storage {
+ if in == nil {
+ return nil
+ }
+ out := new(Storage)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Storage) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageList) DeepCopyInto(out *StorageList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Storage, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList.
+func (in *StorageList) DeepCopy() *StorageList {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageSpec) DeepCopyInto(out *StorageSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec.
+func (in *StorageSpec) DeepCopy() *StorageSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageStatus) DeepCopyInto(out *StorageStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus.
+func (in *StorageStatus) DeepCopy() *StorageStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SyslogLoggingDestinationParameters) DeepCopyInto(out *SyslogLoggingDestinationParameters) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogLoggingDestinationParameters.
+func (in *SyslogLoggingDestinationParameters) DeepCopy() *SyslogLoggingDestinationParameters {
+ if in == nil {
+ return nil
+ }
+ out := new(SyslogLoggingDestinationParameters)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Upstream) DeepCopyInto(out *Upstream) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Upstream.
+func (in *Upstream) DeepCopy() *Upstream {
+ if in == nil {
+ return nil
+ }
+ out := new(Upstream)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpstreamResolvers) DeepCopyInto(out *UpstreamResolvers) {
+ *out = *in
+ if in.Upstreams != nil {
+ in, out := &in.Upstreams, &out.Upstreams
+ *out = make([]Upstream, len(*in))
+ copy(*out, *in)
+ }
+ in.TransportConfig.DeepCopyInto(&out.TransportConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpstreamResolvers.
+func (in *UpstreamResolvers) DeepCopy() *UpstreamResolvers {
+ if in == nil {
+ return nil
+ }
+ out := new(UpstreamResolvers)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSphereCSIDriverConfigSpec) DeepCopyInto(out *VSphereCSIDriverConfigSpec) {
+ *out = *in
+ if in.TopologyCategories != nil {
+ in, out := &in.TopologyCategories, &out.TopologyCategories
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GlobalMaxSnapshotsPerBlockVolume != nil {
+ in, out := &in.GlobalMaxSnapshotsPerBlockVolume, &out.GlobalMaxSnapshotsPerBlockVolume
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.GranularMaxSnapshotsPerBlockVolumeInVSAN != nil {
+ in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVSAN, &out.GranularMaxSnapshotsPerBlockVolumeInVSAN
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.GranularMaxSnapshotsPerBlockVolumeInVVOL != nil {
+ in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVVOL, &out.GranularMaxSnapshotsPerBlockVolumeInVVOL
+ *out = new(uint32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereCSIDriverConfigSpec.
+func (in *VSphereCSIDriverConfigSpec) DeepCopy() *VSphereCSIDriverConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VSphereCSIDriverConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..a8c2213cff
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,427 @@
+authentications.operator.openshift.io:
+ Annotations:
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: authentications.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: authentication
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_50"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: Authentication
+ Labels: {}
+ PluralName: authentications
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+csisnapshotcontrollers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/562
+ CRDName: csisnapshotcontrollers.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: csi-snapshot-controller
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_80"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: CSISnapshotController
+ Labels: {}
+ PluralName: csisnapshotcontrollers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+cloudcredentials.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/692
+ CRDName: cloudcredentials.operator.openshift.io
+ Capability: CloudCredential
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: cloud-credential
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: "0000_40"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: CloudCredential
+ Labels: {}
+ PluralName: cloudcredentials
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+clustercsidrivers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/701
+ CRDName: clustercsidrivers.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - VSphereDriverConfiguration
+ FilenameOperatorName: csi-driver
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_90"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: ClusterCSIDriver
+ Labels: {}
+ PluralName: clustercsidrivers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+configs.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/612
+ CRDName: configs.operator.openshift.io
+ Capability: ""
+ Category: coreoperators
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: Config
+ Labels: {}
+ PluralName: configs
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+consoles.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/486
+ CRDName: consoles.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: console
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_50"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: Console
+ Labels: {}
+ PluralName: consoles
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+dnses.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: dnses.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: dns
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: "0000_70"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: DNS
+ Labels: {}
+ PluralName: dnses
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+etcds.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/752
+ CRDName: etcds.operator.openshift.io
+ Capability: ""
+ Category: coreoperators
+ FeatureGates:
+ - EtcdBackendQuota
+ - HardwareSpeed
+ FilenameOperatorName: etcd
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_12"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: Etcd
+ Labels: {}
+ PluralName: etcds
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+ingresscontrollers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/616
+ CRDName: ingresscontrollers.operator.openshift.io
+ Capability: Ingress
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ingress
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: "0000_50"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: IngressController
+ Labels: {}
+ PluralName: ingresscontrollers
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+insightsoperators.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1237
+ CRDName: insightsoperators.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: insights
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: "0000_50"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: InsightsOperator
+ Labels: {}
+ PluralName: insightsoperators
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+kubeapiservers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: kubeapiservers.operator.openshift.io
+ Capability: ""
+ Category: coreoperators
+ FeatureGates: []
+ FilenameOperatorName: kube-apiserver
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_20"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: KubeAPIServer
+ Labels: {}
+ PluralName: kubeapiservers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+kubecontrollermanagers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: kubecontrollermanagers.operator.openshift.io
+ Capability: ""
+ Category: coreoperators
+ FeatureGates: []
+ FilenameOperatorName: kube-controller-manager
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_25"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: KubeControllerManager
+ Labels: {}
+ PluralName: kubecontrollermanagers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+kubeschedulers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: kubeschedulers.operator.openshift.io
+ Capability: ""
+ Category: coreoperators
+ FeatureGates: []
+ FilenameOperatorName: kube-scheduler
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_25"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: KubeScheduler
+ Labels: {}
+ PluralName: kubeschedulers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+kubestorageversionmigrators.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/503
+ CRDName: kubestorageversionmigrators.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: kube-storage-version-migrator
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: "0000_40"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: KubeStorageVersionMigrator
+ Labels: {}
+ PluralName: kubestorageversionmigrators
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+machineconfigurations.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1453
+ CRDName: machineconfigurations.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - ManagedBootImages
+ - NodeDisruptionPolicy
+ FilenameOperatorName: machine-config
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_80"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: MachineConfiguration
+ Labels: {}
+ PluralName: machineconfigurations
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+networks.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: networks.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - NetworkLiveMigration
+ FilenameOperatorName: network
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_70"
+ GroupName: operator.openshift.io
+ HasStatus: false
+ KindName: Network
+ Labels: {}
+ PluralName: networks
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+openshiftapiservers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: openshiftapiservers.operator.openshift.io
+ Capability: ""
+ Category: coreoperators
+ FeatureGates: []
+ FilenameOperatorName: openshift-apiserver
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_30"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: OpenShiftAPIServer
+ Labels: {}
+ PluralName: openshiftapiservers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+openshiftcontrollermanagers.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: openshiftcontrollermanagers.operator.openshift.io
+ Capability: ""
+ Category: coreoperators
+ FeatureGates: []
+ FilenameOperatorName: openshift-controller-manager
+ FilenameOperatorOrdering: "02"
+ FilenameRunLevel: "0000_50"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: OpenShiftControllerManager
+ Labels: {}
+ PluralName: openshiftcontrollermanagers
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+servicecas.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/475
+ CRDName: servicecas.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: service-ca
+ FilenameOperatorOrdering: "02"
+ FilenameRunLevel: "0000_50"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: ServiceCA
+ Labels: {}
+ PluralName: servicecas
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
+storages.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/670
+ CRDName: storages.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: storage
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_50"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: Storage
+ Labels: {}
+ PluralName: storages
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..09718520eb
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,1975 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_GenerationStatus = map[string]string{
+ "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.",
+ "group": "group is the group of the thing you're tracking",
+ "resource": "resource is the resource type of the thing you're tracking",
+ "namespace": "namespace is where the thing you're tracking is",
+ "name": "name is the name of the thing you're tracking",
+ "lastGeneration": "lastGeneration is the last generation of the workload controller involved",
+ "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps",
+}
+
+func (GenerationStatus) SwaggerDoc() map[string]string {
+ return map_GenerationStatus
+}
+
+var map_MyOperatorResource = map[string]string{
+ "": "MyOperatorResource is an example operator configuration type\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (MyOperatorResource) SwaggerDoc() map[string]string {
+ return map_MyOperatorResource
+}
+
+var map_NodeStatus = map[string]string{
+ "": "NodeStatus provides information about the current state of a particular node managed by this operator.",
+ "nodeName": "nodeName is the name of the node",
+ "currentRevision": "currentRevision is the generation of the most recently successful deployment",
+ "targetRevision": "targetRevision is the generation of the deployment we're trying to apply",
+ "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.",
+ "lastFailedTime": "lastFailedTime is the time the last failed revision failed the last time.",
+ "lastFailedReason": "lastFailedReason is a machine readable failure reason string.",
+ "lastFailedCount": "lastFailedCount is how often the installer pod of the last failed revision failed.",
+ "lastFallbackCount": "lastFallbackCount is how often a fallback to a previous revision happened.",
+ "lastFailedRevisionErrors": "lastFailedRevisionErrors is a list of human readable errors during the failed deployment referenced in lastFailedRevision.",
+}
+
+func (NodeStatus) SwaggerDoc() map[string]string {
+ return map_NodeStatus
+}
+
+var map_OperatorCondition = map[string]string{
+ "": "OperatorCondition is just the standard condition fields.",
+}
+
+func (OperatorCondition) SwaggerDoc() map[string]string {
+ return map_OperatorCondition
+}
+
+var map_OperatorSpec = map[string]string{
+ "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.",
+ "managementState": "managementState indicates whether and how the operator should manage the component",
+ "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".",
+ "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".",
+ "unsupportedConfigOverrides": "unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster.",
+ "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator",
+}
+
+func (OperatorSpec) SwaggerDoc() map[string]string {
+ return map_OperatorSpec
+}
+
+var map_OperatorStatus = map[string]string{
+ "observedGeneration": "observedGeneration is the last generation change you've dealt with",
+ "conditions": "conditions is a list of conditions and their status",
+ "version": "version is the level this availability applies to",
+ "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state",
+ "generations": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.",
+}
+
+func (OperatorStatus) SwaggerDoc() map[string]string {
+ return map_OperatorStatus
+}
+
+var map_StaticPodOperatorSpec = map[string]string{
+ "": "StaticPodOperatorSpec is spec for controllers that manage static pods.",
+ "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.",
+ "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)",
+ "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)",
+}
+
+func (StaticPodOperatorSpec) SwaggerDoc() map[string]string {
+ return map_StaticPodOperatorSpec
+}
+
+var map_StaticPodOperatorStatus = map[string]string{
+ "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.",
+ "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment",
+ "latestAvailableRevisionReason": "latestAvailableRevisionReason describe the detailed reason for the most recent deployment",
+ "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes",
+}
+
+func (StaticPodOperatorStatus) SwaggerDoc() map[string]string {
+ return map_StaticPodOperatorStatus
+}
+
+var map_Authentication = map[string]string{
+ "": "Authentication provides information to configure an operator to manage authentication.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (Authentication) SwaggerDoc() map[string]string {
+ return map_Authentication
+}
+
+var map_AuthenticationList = map[string]string{
+ "": "AuthenticationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (AuthenticationList) SwaggerDoc() map[string]string {
+ return map_AuthenticationList
+}
+
+var map_AuthenticationStatus = map[string]string{
+ "oauthAPIServer": "OAuthAPIServer holds status specific only to oauth-apiserver",
+}
+
+func (AuthenticationStatus) SwaggerDoc() map[string]string {
+ return map_AuthenticationStatus
+}
+
+var map_OAuthAPIServerStatus = map[string]string{
+ "latestAvailableRevision": "LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.",
+}
+
+func (OAuthAPIServerStatus) SwaggerDoc() map[string]string {
+ return map_OAuthAPIServerStatus
+}
+
+var map_CloudCredential = map[string]string{
+ "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (CloudCredential) SwaggerDoc() map[string]string {
+ return map_CloudCredential
+}
+
+var map_CloudCredentialList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (CloudCredentialList) SwaggerDoc() map[string]string {
+ return map_CloudCredentialList
+}
+
+var map_CloudCredentialSpec = map[string]string{
+ "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.",
+ "credentialsMode": "CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"",
+}
+
+func (CloudCredentialSpec) SwaggerDoc() map[string]string {
+ return map_CloudCredentialSpec
+}
+
+var map_CloudCredentialStatus = map[string]string{
+ "": "CloudCredentialStatus defines the observed status of the cloud-credential-operator.",
+}
+
+func (CloudCredentialStatus) SwaggerDoc() map[string]string {
+ return map_CloudCredentialStatus
+}
+
+var map_Config = map[string]string{
+ "": "Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the Config Operator.",
+ "status": "status defines the observed status of the Config Operator.",
+}
+
+func (Config) SwaggerDoc() map[string]string {
+ return map_Config
+}
+
+var map_ConfigList = map[string]string{
+ "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (ConfigList) SwaggerDoc() map[string]string {
+ return map_ConfigList
+}
+
+var map_AddPage = map[string]string{
+ "": "AddPage allows customizing actions on the Add page in developer perspective.",
+ "disabledActions": "disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID.",
+}
+
+func (AddPage) SwaggerDoc() map[string]string {
+ return map_AddPage
+}
+
+var map_Console = map[string]string{
+ "": "Console provides a means to configure an operator to manage the console.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (Console) SwaggerDoc() map[string]string {
+ return map_Console
+}
+
+var map_ConsoleConfigRoute = map[string]string{
+ "": "ConsoleConfigRoute holds information on external route access to console. DEPRECATED",
+ "hostname": "hostname is the desired custom domain under which console will be available.",
+ "secret": "secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - \"tls.crt\" - to specifies custom certificate - \"tls.key\" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.",
+}
+
+func (ConsoleConfigRoute) SwaggerDoc() map[string]string {
+ return map_ConsoleConfigRoute
+}
+
+var map_ConsoleCustomization = map[string]string{
+ "": "ConsoleCustomization defines a list of optional configuration for the console UI.",
+ "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.",
+ "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.",
+ "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.",
+ "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred",
+ "developerCatalog": "developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs).",
+ "projectAccess": "projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options.",
+ "quickStarts": "quickStarts allows customization of available ConsoleQuickStart resources in console.",
+ "addPage": "addPage allows customizing actions on the Add page in developer perspective.",
+ "perspectives": "perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown.",
+}
+
+func (ConsoleCustomization) SwaggerDoc() map[string]string {
+ return map_ConsoleCustomization
+}
+
+var map_ConsoleList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ConsoleList) SwaggerDoc() map[string]string {
+ return map_ConsoleList
+}
+
+var map_ConsoleProviders = map[string]string{
+ "": "ConsoleProviders defines a list of optional additional providers of functionality to the console.",
+ "statuspage": "statuspage contains ID for statuspage.io page that provides status info about.",
+}
+
+func (ConsoleProviders) SwaggerDoc() map[string]string {
+ return map_ConsoleProviders
+}
+
+var map_ConsoleSpec = map[string]string{
+ "": "ConsoleSpec is the specification of the desired behavior of the Console.",
+ "customization": "customization is used to optionally provide a small set of customization options to the web console.",
+ "providers": "providers contains configuration for using specific service providers.",
+ "route": "route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED",
+ "plugins": "plugins defines a list of enabled console plugin names.",
+}
+
+func (ConsoleSpec) SwaggerDoc() map[string]string {
+ return map_ConsoleSpec
+}
+
+var map_ConsoleStatus = map[string]string{
+ "": "ConsoleStatus defines the observed status of the Console.",
+}
+
+func (ConsoleStatus) SwaggerDoc() map[string]string {
+ return map_ConsoleStatus
+}
+
+var map_DeveloperConsoleCatalogCategory = map[string]string{
+ "": "DeveloperConsoleCatalogCategory for the developer console catalog.",
+ "subcategories": "subcategories defines a list of child categories.",
+}
+
+func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string {
+ return map_DeveloperConsoleCatalogCategory
+}
+
+var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{
+ "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.",
+ "id": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.",
+ "label": "label defines a category display label. It is required and must have 1-64 characters.",
+ "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.",
+}
+
+func (DeveloperConsoleCatalogCategoryMeta) SwaggerDoc() map[string]string {
+ return map_DeveloperConsoleCatalogCategoryMeta
+}
+
+var map_DeveloperConsoleCatalogCustomization = map[string]string{
+ "": "DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog.",
+ "categories": "categories which are shown in the developer catalog.",
+ "types": "types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. When omitted, all the sub-catalog types will be shown.",
+}
+
+func (DeveloperConsoleCatalogCustomization) SwaggerDoc() map[string]string {
+ return map_DeveloperConsoleCatalogCustomization
+}
+
+var map_DeveloperConsoleCatalogTypes = map[string]string{
+ "": "DeveloperConsoleCatalogTypes defines the state of the sub-catalog types.",
+ "state": "state defines if a list of catalog types should be enabled or disabled.",
+ "enabled": "enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: \"Devfile\", \"HelmChart\", \"BuilderImage\" If the list is non-empty, a new type will not be shown to the user until it is added to list. If the list is empty the complete developer catalog will be shown.",
+ "disabled": "disabled is a list of developer catalog types (sub-catalogs IDs) that are not shown to users. Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available in the console on the cluster configuration page, or when editing the YAML in the console. Example: \"Devfile\", \"HelmChart\", \"BuilderImage\" If the list is empty or all the available sub-catalog types are added, then the complete developer catalog should be hidden.",
+}
+
+func (DeveloperConsoleCatalogTypes) SwaggerDoc() map[string]string {
+ return map_DeveloperConsoleCatalogTypes
+}
+
+var map_Perspective = map[string]string{
+ "": "Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown",
+ "id": "id defines the id of the perspective. Example: \"dev\", \"admin\". The available perspective ids can be found in the code snippet section next to the yaml editor. Incorrect or unknown ids will be ignored.",
+ "visibility": "visibility defines the state of perspective along with access review checks if needed for that perspective.",
+ "pinnedResources": "pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. The list of available Kubernetes resources could be read via `kubectl api-resources`. The console will also provide a configuration UI and a YAML snippet that will list the available resources that can be pinned to the navigation. Incorrect or unknown resources will be ignored.",
+}
+
+func (Perspective) SwaggerDoc() map[string]string {
+ return map_Perspective
+}
+
+var map_PerspectiveVisibility = map[string]string{
+ "": "PerspectiveVisibility defines the criteria to show/hide a perspective",
+ "state": "state defines the perspective is enabled or disabled or access review check is required.",
+ "accessReview": "accessReview defines required and missing access review checks.",
+}
+
+func (PerspectiveVisibility) SwaggerDoc() map[string]string {
+ return map_PerspectiveVisibility
+}
+
+var map_PinnedResourceReference = map[string]string{
+ "": "PinnedResourceReference includes the group, version and type of resource",
+ "group": "group is the API Group of the Resource. Enter empty string for the core group. This value should consist of only lowercase alphanumeric characters, hyphens and periods. Example: \"\", \"apps\", \"build.openshift.io\", etc.",
+ "version": "version is the API Version of the Resource. This value should consist of only lowercase alphanumeric characters. Example: \"v1\", \"v1beta1\", etc.",
+ "resource": "resource is the type that is being referenced. It is normally the plural form of the resource kind in lowercase. This value should consist of only lowercase alphanumeric characters and hyphens. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.",
+}
+
+func (PinnedResourceReference) SwaggerDoc() map[string]string {
+ return map_PinnedResourceReference
+}
+
+var map_ProjectAccess = map[string]string{
+ "": "ProjectAccess contains options for project access roles",
+ "availableClusterRoles": "availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab.",
+}
+
+func (ProjectAccess) SwaggerDoc() map[string]string {
+ return map_ProjectAccess
+}
+
+var map_QuickStarts = map[string]string{
+ "": "QuickStarts allow cluster admins to customize available ConsoleQuickStart resources.",
+ "disabled": "disabled is a list of ConsoleQuickStart resource names that are not shown to users.",
+}
+
+func (QuickStarts) SwaggerDoc() map[string]string {
+ return map_QuickStarts
+}
+
+var map_ResourceAttributesAccessReview = map[string]string{
+ "": "ResourceAttributesAccessReview defines the visibility of the perspective depending on the access review checks. `required` and `missing` can work together esp. in the case where the cluster admin wants to show another perspective to users without specific permissions. Out of `required` and `missing` atleast one property should be non-empty.",
+ "required": "required defines a list of permission checks. The perspective will only be shown when all checks are successful. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the missing access review list.",
+ "missing": "missing defines a list of permission checks. The perspective will only be shown when at least one check fails. When omitted, the access review is skipped and the perspective will not be shown unless it is required to do so based on the configuration of the required access review list.",
+}
+
+func (ResourceAttributesAccessReview) SwaggerDoc() map[string]string {
+ return map_ResourceAttributesAccessReview
+}
+
+var map_StatuspageProvider = map[string]string{
+ "": "StatuspageProvider provides identity for statuspage account.",
+ "pageID": "pageID is the unique ID assigned by Statuspage for your page. This must be a public page.",
+}
+
+func (StatuspageProvider) SwaggerDoc() map[string]string {
+ return map_StatuspageProvider
+}
+
+var map_AWSCSIDriverConfigSpec = map[string]string{
+ "": "AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.",
+ "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.",
+}
+
+func (AWSCSIDriverConfigSpec) SwaggerDoc() map[string]string {
+ return map_AWSCSIDriverConfigSpec
+}
+
+var map_AzureCSIDriverConfigSpec = map[string]string{
+ "": "AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver.",
+ "diskEncryptionSet": "diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys.",
+}
+
+func (AzureCSIDriverConfigSpec) SwaggerDoc() map[string]string {
+ return map_AzureCSIDriverConfigSpec
+}
+
+var map_AzureDiskEncryptionSet = map[string]string{
+ "": "AzureDiskEncryptionSet defines the configuration for a disk encryption set.",
+ "subscriptionID": "subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378",
+ "resourceGroup": "resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length.",
+ "name": "name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length.",
+}
+
+func (AzureDiskEncryptionSet) SwaggerDoc() map[string]string {
+ return map_AzureDiskEncryptionSet
+}
+
+var map_CSIDriverConfigSpec = map[string]string{
+ "": "CSIDriverConfigSpec defines configuration spec that can be used to optionally configure a specific CSI Driver.",
+ "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. Consumers should treat unknown values as a NO-OP.",
+ "aws": "aws is used to configure the AWS CSI driver.",
+ "azure": "azure is used to configure the Azure CSI driver.",
+ "gcp": "gcp is used to configure the GCP CSI driver.",
+ "ibmcloud": "ibmcloud is used to configure the IBM Cloud CSI driver.",
+ "vSphere": "vsphere is used to configure the vsphere CSI driver.",
+}
+
+func (CSIDriverConfigSpec) SwaggerDoc() map[string]string {
+ return map_CSIDriverConfigSpec
+}
+
+var map_ClusterCSIDriver = map[string]string{
+ "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (ClusterCSIDriver) SwaggerDoc() map[string]string {
+ return map_ClusterCSIDriver
+}
+
+var map_ClusterCSIDriverList = map[string]string{
+ "": "ClusterCSIDriverList contains a list of ClusterCSIDriver\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ClusterCSIDriverList) SwaggerDoc() map[string]string {
+ return map_ClusterCSIDriverList
+}
+
+var map_ClusterCSIDriverSpec = map[string]string{
+ "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator",
+ "storageClassState": "StorageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.",
+ "driverConfig": "driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.",
+}
+
+func (ClusterCSIDriverSpec) SwaggerDoc() map[string]string {
+ return map_ClusterCSIDriverSpec
+}
+
+var map_ClusterCSIDriverStatus = map[string]string{
+ "": "ClusterCSIDriverStatus is the observed status of CSI driver operator",
+}
+
+func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string {
+ return map_ClusterCSIDriverStatus
+}
+
+var map_GCPCSIDriverConfigSpec = map[string]string{
+ "": "GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver.",
+ "kmsKey": "kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP.",
+}
+
+func (GCPCSIDriverConfigSpec) SwaggerDoc() map[string]string {
+ return map_GCPCSIDriverConfigSpec
+}
+
+var map_GCPKMSKeyReference = map[string]string{
+ "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key",
+ "name": "name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.",
+ "keyRing": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.",
+ "projectID": "projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited.",
+ "location": "location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or \"global\". Defaults to global, if not set.",
+}
+
+func (GCPKMSKeyReference) SwaggerDoc() map[string]string {
+ return map_GCPKMSKeyReference
+}
+
+var map_IBMCloudCSIDriverConfigSpec = map[string]string{
+ "": "IBMCloudCSIDriverConfigSpec defines the properties that can be configured for the IBM Cloud CSI driver.",
+ "encryptionKeyCRN": "encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use for disk encryption of volumes for the default storage classes.",
+}
+
+func (IBMCloudCSIDriverConfigSpec) SwaggerDoc() map[string]string {
+ return map_IBMCloudCSIDriverConfigSpec
+}
+
+var map_VSphereCSIDriverConfigSpec = map[string]string{
+ "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.",
+ "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.",
+ "globalMaxSnapshotsPerBlockVolume": "globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. Snapshots can not be disabled using this parameter. Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html",
+ "granularMaxSnapshotsPerBlockVolumeInVSAN": "granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VSAN can not be disabled using this parameter.",
+ "granularMaxSnapshotsPerBlockVolumeInVVOL": "granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VVOL can not be disabled using this parameter.",
+}
+
+func (VSphereCSIDriverConfigSpec) SwaggerDoc() map[string]string {
+ return map_VSphereCSIDriverConfigSpec
+}
+
+var map_CSISnapshotController = map[string]string{
+ "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (CSISnapshotController) SwaggerDoc() map[string]string {
+ return map_CSISnapshotController
+}
+
+var map_CSISnapshotControllerList = map[string]string{
+ "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (CSISnapshotControllerList) SwaggerDoc() map[string]string {
+ return map_CSISnapshotControllerList
+}
+
+var map_CSISnapshotControllerSpec = map[string]string{
+ "": "CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.",
+}
+
+func (CSISnapshotControllerSpec) SwaggerDoc() map[string]string {
+ return map_CSISnapshotControllerSpec
+}
+
+var map_CSISnapshotControllerStatus = map[string]string{
+ "": "CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.",
+}
+
+func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string {
+ return map_CSISnapshotControllerStatus
+}
+
+var map_DNS = map[string]string{
+ "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the DNS.",
+ "status": "status is the most recently observed status of the DNS.",
+}
+
+func (DNS) SwaggerDoc() map[string]string {
+ return map_DNS
+}
+
+var map_DNSCache = map[string]string{
+ "": "DNSCache defines the fields for configuring DNS caching.",
+ "positiveTTL": "positiveTTL is optional and specifies the amount of time that a positive response should be cached.\n\nIf configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 900 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 900 seconds is subject to change.",
+ "negativeTTL": "negativeTTL is optional and specifies the amount of time that a negative response should be cached.\n\nIf configured, it must be a value of 1s (1 second) or greater up to a theoretical maximum of several years. This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"100s\", \"1m30s\", \"12h30m10s\". Values that are fractions of a second are rounded down to the nearest second. If the configured value is less than 1s, the default value will be used. If not configured, the value will be 0s and OpenShift will use a default value of 30 seconds unless noted otherwise in the respective Corefile for your version of OpenShift. The default value of 30 seconds is subject to change.",
+}
+
+func (DNSCache) SwaggerDoc() map[string]string {
+ return map_DNSCache
+}
+
+var map_DNSList = map[string]string{
+ "": "DNSList contains a list of DNS\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (DNSList) SwaggerDoc() map[string]string {
+ return map_DNSList
+}
+
+var map_DNSNodePlacement = map[string]string{
+ "": "DNSNodePlacement describes the node scheduling configuration for DNS pods.",
+ "nodeSelector": "nodeSelector is the node selector applied to DNS pods.\n\nIf empty, the default is used, which is currently the following:\n\n kubernetes.io/os: linux\n\nThis default is subject to change.\n\nIf set, the specified selector is used and replaces the default.",
+ "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nIf empty, the DNS operator sets a toleration for the \"node-role.kubernetes.io/master\" taint. This default is subject to change. Specifying tolerations without including a toleration for the \"node-role.kubernetes.io/master\" taint may be risky as it could lead to an outage if all worker nodes become unavailable.\n\nNote that the daemon controller adds some tolerations as well. See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/",
+}
+
+func (DNSNodePlacement) SwaggerDoc() map[string]string {
+ return map_DNSNodePlacement
+}
+
+var map_DNSOverTLSConfig = map[string]string{
+ "": "DNSOverTLSConfig describes optional DNSTransportConfig fields that should be captured.",
+ "serverName": "serverName is the upstream server to connect to when forwarding DNS queries. This is required when Transport is set to \"TLS\". ServerName will be validated against the DNS naming conventions in RFC 1123 and should match the TLS certificate installed in the upstream resolver(s).",
+ "caBundle": "caBundle references a ConfigMap that must contain either a single CA Certificate or a CA Bundle. This allows cluster administrators to provide their own CA or CA bundle for validating the certificate of upstream resolvers.\n\n1. The configmap must contain a `ca-bundle.crt` key. 2. The value must be a PEM encoded CA certificate or CA bundle. 3. The administrator must create this configmap in the openshift-config namespace. 4. The upstream server certificate must contain a Subject Alternative Name (SAN) that matches ServerName.",
+}
+
+func (DNSOverTLSConfig) SwaggerDoc() map[string]string {
+ return map_DNSOverTLSConfig
+}
+
+var map_DNSSpec = map[string]string{
+ "": "DNSSpec is the specification of the desired behavior of the DNS.",
+ "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.",
+ "upstreamResolvers": "upstreamResolvers defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers for the case of the default (\".\") server\n\nIf this field is not specified, the upstream used will default to /etc/resolv.conf, with policy \"sequential\"",
+ "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.",
+ "managementState": "managementState indicates whether the DNS operator should manage cluster DNS",
+ "operatorLogLevel": "operatorLogLevel controls the logging level of the DNS Operator. Valid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\". setting operatorLogLevel: Trace will produce extremely verbose logs.",
+ "logLevel": "logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses.\n Setting logLevel: Trace will produce extremely verbose logs.\nValid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\".",
+ "cache": "cache describes the caching configuration that applies to all server blocks listed in the Corefile. This field allows a cluster admin to optionally configure: * positiveTTL which is a duration for which positive responses should be cached. * negativeTTL which is a duration for which negative responses should be cached. If this is not configured, OpenShift will configure positive and negative caching with a default value that is subject to change. At the time of writing, the default positiveTTL is 900 seconds and the default negativeTTL is 30 seconds or as noted in the respective Corefile for your version of OpenShift.",
+}
+
+func (DNSSpec) SwaggerDoc() map[string]string {
+ return map_DNSSpec
+}
+
+var map_DNSStatus = map[string]string{
+ "": "DNSStatus defines the observed status of the DNS.",
+ "clusterIP": "clusterIP is the service IP through which this DNS is made available.\n\nIn the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy.\n\nIn general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ "clusterDomain": "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\"\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service",
+ "conditions": "conditions provide information about the state of the DNS on the cluster.\n\nThese are the supported DNS conditions:\n\n * Available\n - True if the following conditions are met:\n * DNS controller daemonset is available.\n - False if any of those conditions are unsatisfied.",
+}
+
+func (DNSStatus) SwaggerDoc() map[string]string {
+ return map_DNSStatus
+}
+
+var map_DNSTransportConfig = map[string]string{
+ "": "DNSTransportConfig groups related configuration parameters used for configuring forwarding to upstream resolvers that support DNS-over-TLS.",
+ "transport": "transport allows cluster administrators to opt-in to using a DNS-over-TLS connection between cluster DNS and an upstream resolver(s). Configuring TLS as the transport at this level without configuring a CABundle will result in the system certificates being used to verify the serving certificate of the upstream resolver(s).\n\nPossible values: \"\" (empty) - This means no explicit choice has been made and the platform chooses the default which is subject to change over time. The current default is \"Cleartext\". \"Cleartext\" - Cluster admin specified cleartext option. This results in the same functionality as an empty value but may be useful when a cluster admin wants to be more explicit about the transport, or wants to switch from \"TLS\" to \"Cleartext\" explicitly. \"TLS\" - This indicates that DNS queries should be sent over a TLS connection. If Transport is set to TLS, you MUST also set ServerName. If a port is not included with the upstream IP, port 853 will be tried by default per RFC 7858 section 3.1; https://datatracker.ietf.org/doc/html/rfc7858#section-3.1.",
+ "tls": "tls contains the additional configuration options to use when Transport is set to \"TLS\".",
+}
+
+func (DNSTransportConfig) SwaggerDoc() map[string]string {
+ return map_DNSTransportConfig
+}
+
+var map_ForwardPlugin = map[string]string{
+ "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.",
+ "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.",
+ "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"",
+ "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.",
+ "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.",
+}
+
+func (ForwardPlugin) SwaggerDoc() map[string]string {
+ return map_ForwardPlugin
+}
+
+var map_Server = map[string]string{
+ "": "Server defines the schema for a server that runs per instance of CoreDNS.",
+ "name": "name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335.",
+ "zones": "zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., \"cluster.local\") is invalid.",
+ "forwardPlugin": "forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers.",
+}
+
+func (Server) SwaggerDoc() map[string]string {
+ return map_Server
+}
+
+var map_Upstream = map[string]string{
+ "": "Upstream can either be of type SystemResolvConf, or of type Network.\n\n - For an Upstream of type SystemResolvConf, no further fields are necessary:\n The upstream will be configured to use /etc/resolv.conf.\n - For an Upstream of type Network, a NetworkResolver field needs to be defined\n with an IP address or IP:port if the upstream listens on a port other than 53.",
+ "type": "Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address",
+ "address": "Address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.",
+ "port": "Port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535",
+}
+
+func (Upstream) SwaggerDoc() map[string]string {
+ return map_Upstream
+}
+
+var map_UpstreamResolvers = map[string]string{
+ "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential",
+ "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default",
+ "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"",
+ "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.",
+ "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.",
+}
+
+func (UpstreamResolvers) SwaggerDoc() map[string]string {
+ return map_UpstreamResolvers
+}
+
+var map_Etcd = map[string]string{
+ "": "Etcd provides information to configure an operator to manage etcd.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (Etcd) SwaggerDoc() map[string]string {
+ return map_Etcd
+}
+
+var map_EtcdList = map[string]string{
+ "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (EtcdList) SwaggerDoc() map[string]string {
+ return map_EtcdList
+}
+
+var map_EtcdSpec = map[string]string{
+ "controlPlaneHardwareSpeed": "HardwareSpeed allows user to change the etcd tuning profile which configures the latency parameters for heartbeat interval and leader election timeouts allowing the cluster to tolerate longer round-trip-times between etcd members. Valid values are \"\", \"Standard\" and \"Slower\".\n\t\"\" means no opinion and the platform is left to choose a reasonable default\n\twhich is subject to change without notice.",
+ "backendQuotaGiB": "backendQuotaGiB sets the etcd backend storage size limit in gibibytes. The value should be an integer not less than 8 and not more than 32. When not specified, the default value is 8.",
+}
+
+func (EtcdSpec) SwaggerDoc() map[string]string {
+ return map_EtcdSpec
+}
+
+var map_AWSClassicLoadBalancerParameters = map[string]string{
+ "": "AWSClassicLoadBalancerParameters holds configuration parameters for an AWS Classic load balancer.",
+ "connectionIdleTimeout": "connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change.",
+}
+
+func (AWSClassicLoadBalancerParameters) SwaggerDoc() map[string]string {
+ return map_AWSClassicLoadBalancerParameters
+}
+
+var map_AWSLoadBalancerParameters = map[string]string{
+ "": "AWSLoadBalancerParameters provides configuration settings that are specific to AWS load balancers.",
+ "type": "type is the type of AWS load balancer to instantiate for an ingresscontroller.\n\nValid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb",
+ "classicLoadBalancer": "classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic.",
+ "networkLoadBalancer": "networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB.",
+}
+
+func (AWSLoadBalancerParameters) SwaggerDoc() map[string]string {
+ return map_AWSLoadBalancerParameters
+}
+
+var map_AWSNetworkLoadBalancerParameters = map[string]string{
+ "": "AWSNetworkLoadBalancerParameters holds configuration parameters for an AWS Network load balancer.",
+}
+
+func (AWSNetworkLoadBalancerParameters) SwaggerDoc() map[string]string {
+ return map_AWSNetworkLoadBalancerParameters
+}
+
+var map_AccessLogging = map[string]string{
+ "": "AccessLogging describes how client requests should be logged.",
+ "destination": "destination is where access logs go.",
+ "httpLogFormat": "httpLogFormat specifies the format of the log message for an HTTP request.\n\nIf this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3\n\nNote that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections.",
+ "httpCaptureHeaders": "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured.\n\nNote that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections.",
+ "httpCaptureCookies": "httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured.",
+ "logEmptyRequests": "logEmptyRequests specifies how connections on which no request is received should be logged. Typically, these empty requests come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\"), in which case logging these requests may be undesirable. However, these requests may also be caused by network errors, in which case logging empty requests may be useful for diagnosing the errors. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. Allowed values for this field are \"Log\" and \"Ignore\". The default value is \"Log\".",
+}
+
+func (AccessLogging) SwaggerDoc() map[string]string {
+ return map_AccessLogging
+}
+
+var map_ClientTLS = map[string]string{
+ "": "ClientTLS specifies TLS configuration to enable client-to-server authentication, which can be used for mutual TLS.",
+ "clientCertificatePolicy": "clientCertificatePolicy specifies whether the ingress controller requires clients to provide certificates. This field accepts the values \"Required\" or \"Optional\".\n\nNote that the ingress controller only checks client certificates for edge-terminated and reencrypt TLS routes; it cannot check certificates for cleartext HTTP or passthrough TLS routes.",
+ "clientCA": "clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace.",
+ "allowedSubjectPatterns": "allowedSubjectPatterns specifies a list of regular expressions that should be matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. If this list is empty, no filtering is performed. If the list is nonempty, then at least one pattern must match a client certificate's distinguished name or else the ingress controller rejects the certificate and denies the connection.",
+}
+
+func (ClientTLS) SwaggerDoc() map[string]string {
+ return map_ClientTLS
+}
+
+var map_ContainerLoggingDestinationParameters = map[string]string{
+ "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.",
+ "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 8192, inclusive.\n\nWhen omitted, the default value is 1024.",
+}
+
+func (ContainerLoggingDestinationParameters) SwaggerDoc() map[string]string {
+ return map_ContainerLoggingDestinationParameters
+}
+
+var map_EndpointPublishingStrategy = map[string]string{
+ "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.",
+ "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.\n\n* NodePortService\n\nPublishes the ingress controller using a Kubernetes NodePort Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved.",
+ "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.",
+ "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.",
+ "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.",
+ "nodePort": "nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService.",
+}
+
+func (EndpointPublishingStrategy) SwaggerDoc() map[string]string {
+ return map_EndpointPublishingStrategy
+}
+
+var map_GCPLoadBalancerParameters = map[string]string{
+ "": "GCPLoadBalancerParameters provides configuration settings that are specific to GCP load balancers.",
+ "clientAccess": "clientAccess describes how client access is restricted for internal load balancers.\n\nValid values are: * \"Global\": Specifying an internal load balancer with Global client access\n allows clients from any region within the VPC to communicate with the load\n balancer.\n\n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access\n\n* \"Local\": Specifying an internal load balancer with Local client access\n means only clients within the same region (and VPC) as the GCP load balancer\n can communicate with the load balancer. Note that this is the default behavior.\n\n https://cloud.google.com/load-balancing/docs/internal#client_access",
+}
+
+func (GCPLoadBalancerParameters) SwaggerDoc() map[string]string {
+ return map_GCPLoadBalancerParameters
+}
+
+var map_HTTPCompressionPolicy = map[string]string{
+ "": "httpCompressionPolicy turns on compression for the specified MIME types.\n\nThis field is optional, and its absence implies that compression should not be enabled globally in HAProxy.\n\nIf httpCompressionPolicy exists, compression should be enabled only for the specified MIME types.",
+ "mimeTypes": "mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression.\n\nNote: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2",
+}
+
+func (HTTPCompressionPolicy) SwaggerDoc() map[string]string {
+ return map_HTTPCompressionPolicy
+}
+
+var map_HostNetworkStrategy = map[string]string{
+ "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.",
+ "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.",
+ "httpPort": "httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80.",
+ "httpsPort": "httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443.",
+ "statsPort": "statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936.",
+}
+
+func (HostNetworkStrategy) SwaggerDoc() map[string]string {
+ return map_HostNetworkStrategy
+}
+
+var map_IBMLoadBalancerParameters = map[string]string{
+ "": "IBMLoadBalancerParameters provides configuration settings that are specific to IBM Cloud load balancers.",
+ "protocol": "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\"\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nValid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled.",
+}
+
+func (IBMLoadBalancerParameters) SwaggerDoc() map[string]string {
+ return map_IBMLoadBalancerParameters
+}
+
+var map_IngressController = map[string]string{
+ "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the IngressController.",
+ "status": "status is the most recently observed status of the IngressController.",
+}
+
+func (IngressController) SwaggerDoc() map[string]string {
+ return map_IngressController
+}
+
+var map_IngressControllerCaptureHTTPCookie = map[string]string{
+ "": "IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured.",
+ "maxLength": "maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.",
+}
+
+func (IngressControllerCaptureHTTPCookie) SwaggerDoc() map[string]string {
+ return map_IngressControllerCaptureHTTPCookie
+}
+
+var map_IngressControllerCaptureHTTPCookieUnion = map[string]string{
+ "": "IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured.",
+ "matchType": "matchType specifies the type of match to be performed on the cookie name. Allowed values are \"Exact\" for an exact string match and \"Prefix\" for a string prefix match. If \"Exact\" is specified, a name must be specified in the name field. If \"Prefix\" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType \"Prefix\" and namePrefix \"foo\" will capture a cookie named \"foo\" or \"foobar\" but not one named \"bar\". The first matching cookie is captured.",
+ "name": "name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.",
+ "namePrefix": "namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.",
+}
+
+func (IngressControllerCaptureHTTPCookieUnion) SwaggerDoc() map[string]string {
+ return map_IngressControllerCaptureHTTPCookieUnion
+}
+
+var map_IngressControllerCaptureHTTPHeader = map[string]string{
+ "": "IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured.",
+ "name": "name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2.",
+ "maxLength": "maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.",
+}
+
+func (IngressControllerCaptureHTTPHeader) SwaggerDoc() map[string]string {
+ return map_IngressControllerCaptureHTTPHeader
+}
+
+var map_IngressControllerCaptureHTTPHeaders = map[string]string{
+ "": "IngressControllerCaptureHTTPHeaders specifies which HTTP headers the IngressController captures.",
+ "request": "request specifies which HTTP request headers to capture.\n\nIf this field is empty, no request headers are captured.",
+ "response": "response specifies which HTTP response headers to capture.\n\nIf this field is empty, no response headers are captured.",
+}
+
+func (IngressControllerCaptureHTTPHeaders) SwaggerDoc() map[string]string {
+ return map_IngressControllerCaptureHTTPHeaders
+}
+
+var map_IngressControllerHTTPHeader = map[string]string{
+ "": "IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header.",
+ "name": "name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, \"-!#$%&'*+.^_`\". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.",
+ "action": "action specifies actions to perform on headers, such as setting or deleting headers.",
+}
+
+func (IngressControllerHTTPHeader) SwaggerDoc() map[string]string {
+ return map_IngressControllerHTTPHeader
+}
+
+var map_IngressControllerHTTPHeaderActionUnion = map[string]string{
+ "": "IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header.",
+ "type": "type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers.",
+ "set": "set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise.",
+}
+
+func (IngressControllerHTTPHeaderActionUnion) SwaggerDoc() map[string]string {
+ return map_IngressControllerHTTPHeaderActionUnion
+}
+
+var map_IngressControllerHTTPHeaderActions = map[string]string{
+ "": "IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers.",
+ "response": "response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are \"res.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[res.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\".",
+ "request": "request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are \"req.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[req.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". ",
+}
+
+func (IngressControllerHTTPHeaderActions) SwaggerDoc() map[string]string {
+ return map_IngressControllerHTTPHeaderActions
+}
+
+var map_IngressControllerHTTPHeaders = map[string]string{
+ "": "IngressControllerHTTPHeaders specifies how the IngressController handles certain HTTP headers.",
+ "forwardedHeaderPolicy": "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following:\n\n* \"Append\", which specifies that the IngressController appends the\n headers, preserving existing headers.\n\n* \"Replace\", which specifies that the IngressController sets the\n headers, replacing any existing Forwarded or X-Forwarded-* headers.\n\n* \"IfNone\", which specifies that the IngressController sets the\n headers if they are not already set.\n\n* \"Never\", which specifies that the IngressController never sets the\n headers, preserving any existing headers.\n\nBy default, the policy is \"Append\".",
+ "uniqueId": "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests.\n\nIf this field is empty, no such header is injected into requests.",
+ "headerNameCaseAdjustments": "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization.\n\nThese adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1.\n\nFor request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses.\n\nIf this field is empty, no request headers are adjusted.",
+ "actions": "actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the \"haproxy.router.openshift.io/hsts_header\" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.",
+}
+
+func (IngressControllerHTTPHeaders) SwaggerDoc() map[string]string {
+ return map_IngressControllerHTTPHeaders
+}
+
+var map_IngressControllerHTTPUniqueIdHeaderPolicy = map[string]string{
+ "": "IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a unique id header.",
+ "name": "name specifies the name of the HTTP header (for example, \"unique-id\") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected.",
+ "format": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3",
+}
+
+func (IngressControllerHTTPUniqueIdHeaderPolicy) SwaggerDoc() map[string]string {
+ return map_IngressControllerHTTPUniqueIdHeaderPolicy
+}
+
+var map_IngressControllerList = map[string]string{
+ "": "IngressControllerList contains a list of IngressControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (IngressControllerList) SwaggerDoc() map[string]string {
+ return map_IngressControllerList
+}
+
+var map_IngressControllerLogging = map[string]string{
+ "": "IngressControllerLogging describes what should be logged where.",
+ "access": "access describes how the client requests should be logged.\n\nIf this field is empty, access logging is disabled.",
+}
+
+func (IngressControllerLogging) SwaggerDoc() map[string]string {
+ return map_IngressControllerLogging
+}
+
+var map_IngressControllerSetHTTPHeader = map[string]string{
+ "": "IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header.",
+ "value": "value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. ",
+}
+
+func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string {
+ return map_IngressControllerSetHTTPHeader
+}
+
+var map_IngressControllerSpec = map[string]string{
+ "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.",
+ "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.",
+ "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.",
+ "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.",
+ "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.",
+ "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.",
+ "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.",
+ "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.",
+ "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.",
+ "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.",
+ "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.",
+ "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.",
+ "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.",
+ "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.",
+ "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.",
+ "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.",
+ "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.",
+ "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.",
+}
+
+func (IngressControllerSpec) SwaggerDoc() map[string]string {
+ return map_IngressControllerSpec
+}
+
+var map_IngressControllerStatus = map[string]string{
+ "": "IngressControllerStatus defines the observed status of the IngressController.",
+ "availableReplicas": "availableReplicas is number of observed available replicas according to the ingress controller deployment.",
+ "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.",
+ "domain": "domain is the actual domain in use.",
+ "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.",
+ "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.",
+ "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.",
+ "observedGeneration": "observedGeneration is the most recent generation observed.",
+ "namespaceSelector": "namespaceSelector is the actual namespaceSelector in use.",
+ "routeSelector": "routeSelector is the actual routeSelector in use.",
+}
+
+func (IngressControllerStatus) SwaggerDoc() map[string]string {
+ return map_IngressControllerStatus
+}
+
+var map_IngressControllerTuningOptions = map[string]string{
+ "": "IngressControllerTuningOptions specifies options for tuning the performance of ingress controller pods",
+ "headerBufferBytes": "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes.\n\nSetting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary.",
+ "headerBufferMaxRewriteBytes": "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes.\n\nSetting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary.",
+ "threadCount": "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases.\n\nSetting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly.",
+ "clientTimeout": "clientTimeout defines how long a connection will be held open while waiting for a client response.\n\nIf unset, the default timeout is 30s",
+ "clientFinTimeout": "clientFinTimeout defines how long a connection will be held open while waiting for the client response to the server/backend closing the connection.\n\nIf unset, the default timeout is 1s",
+ "serverTimeout": "serverTimeout defines how long a connection will be held open while waiting for a server/backend response.\n\nIf unset, the default timeout is 30s",
+ "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s",
+ "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h",
+ "connectTimeout": "ConnectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.",
+ "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s",
+ "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.",
+ "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.",
+ "reloadInterval": "reloadInterval defines the minimum interval at which the router is allowed to reload to accept new changes. Increasing this value can prevent the accumulation of HAProxy processes, depending on the scenario. Increasing this interval can also lessen load imbalance on a backend's servers when using the roundrobin balancing algorithm. Alternatively, decreasing this value may decrease latency since updates to HAProxy's configuration can take effect more quickly.\n\nThe value must be a time duration value; see . Currently, the minimum value allowed is 1s, and the maximum allowed value is 120s. Minimum and maximum allowed values may change in future versions of OpenShift. Note that if a duration outside of these bounds is provided, the value of reloadInterval will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to 120s; the IngressController will not reject and replace this disallowed value with the default).\n\nA zero value for reloadInterval tells the IngressController to choose the default, which is currently 5s and subject to change without notice.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nNote: Setting a value significantly larger than the default of 5s can cause latency in observing updates to routes and their endpoints. HAProxy's configuration will be reloaded less frequently, and newly created routes will not be served until the subsequent reload.",
+}
+
+func (IngressControllerTuningOptions) SwaggerDoc() map[string]string {
+ return map_IngressControllerTuningOptions
+}
+
+var map_LoadBalancerStrategy = map[string]string{
+ "": "LoadBalancerStrategy holds parameters for a load balancer.",
+ "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".",
+ "allowedSourceRanges": "allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. \"10.0.0.0/8\" or \"fd00::/8\"). If no range is specified, \"0.0.0.0/0\" for IPv4 and \"::/0\" for IPv6 are used by default, which allows all source addresses.\n\nTo facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the \"router-\" service in the \"openshift-ingress\" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12.",
+ "providerParameters": "providerParameters holds desired load balancer information specific to the underlying infrastructure provider.\n\nIf empty, defaults will be applied. See specific providerParameters fields for details about their defaults.",
+ "dnsManagementPolicy": "dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged.",
+}
+
+func (LoadBalancerStrategy) SwaggerDoc() map[string]string {
+ return map_LoadBalancerStrategy
+}
+
+var map_LoggingDestination = map[string]string{
+ "": "LoggingDestination describes a destination for log messages.",
+ "type": "type is the type of destination for logs. It must be one of the following:\n\n* Container\n\nThe ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity.\n\n* Syslog\n\nLogs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance.",
+ "syslog": "syslog holds parameters for a syslog endpoint. Present only if type is Syslog.",
+ "container": "container holds parameters for the Container logging destination. Present only if type is Container.",
+}
+
+func (LoggingDestination) SwaggerDoc() map[string]string {
+ return map_LoggingDestination
+}
+
+var map_NodePlacement = map[string]string{
+ "": "NodePlacement describes node scheduling configuration for an ingress controller.",
+ "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf set, the specified selector is used and replaces the default.\n\nIf unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nWhen defaultPlacement is Workers, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nWhen defaultPlacement is ControlPlane, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/master: ''\n\nThese defaults are subject to change.\n\nNote that using nodeSelector.matchExpressions is not supported. Only nodeSelector.matchLabels may be used. This is a limitation of the Kubernetes API: the pod spec does not allow complex expressions for node selectors.",
+ "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/",
+}
+
+func (NodePlacement) SwaggerDoc() map[string]string {
+ return map_NodePlacement
+}
+
+var map_NodePortStrategy = map[string]string{
+ "": "NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.",
+ "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.",
+}
+
+func (NodePortStrategy) SwaggerDoc() map[string]string {
+ return map_NodePortStrategy
+}
+
+var map_PrivateStrategy = map[string]string{
+ "": "PrivateStrategy holds parameters for the Private endpoint publishing strategy.",
+ "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.",
+}
+
+func (PrivateStrategy) SwaggerDoc() map[string]string {
+ return map_PrivateStrategy
+}
+
+var map_ProviderLoadBalancerParameters = map[string]string{
+ "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.",
+ "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"IBM\", \"Nutanix\", \"OpenStack\", and \"VSphere\".",
+ "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.",
+ "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.",
+ "ibm": "ibm provides configuration settings that are specific to IBM Cloud load balancers.\n\nIf empty, defaults will be applied. See specific ibm fields for details about their defaults.",
+}
+
+func (ProviderLoadBalancerParameters) SwaggerDoc() map[string]string {
+ return map_ProviderLoadBalancerParameters
+}
+
+var map_RouteAdmissionPolicy = map[string]string{
+ "": "RouteAdmissionPolicy is an admission policy for allowing new route claims.",
+ "namespaceOwnership": "namespaceOwnership describes how host name claims across namespaces should be handled.\n\nValue must be one of:\n\n- Strict: Do not allow routes in different namespaces to claim the same host.\n\n- InterNamespaceAllowed: Allow routes to claim different paths of the same\n host name across namespaces.\n\nIf empty, the default is Strict.",
+ "wildcardPolicy": "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy.\n\n[1] https://github.com/openshift/api/blob/master/route/v1/types.go\n\nNote: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller.\n\nWildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values.\n\nIf empty, defaults to \"WildcardsDisallowed\".",
+}
+
+func (RouteAdmissionPolicy) SwaggerDoc() map[string]string {
+ return map_RouteAdmissionPolicy
+}
+
+var map_SyslogLoggingDestinationParameters = map[string]string{
+ "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.",
+ "address": "address is the IP address of the syslog endpoint that receives log messages.",
+ "port": "port is the UDP port number of the syslog endpoint that receives log messages.",
+ "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".",
+ "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 4096, inclusive.\n\nWhen omitted, the default value is 1024.",
+}
+
+func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string {
+ return map_SyslogLoggingDestinationParameters
+}
+
+var map_GatherStatus = map[string]string{
+ "": "gatherStatus provides information about the last known gather event.",
+ "lastGatherTime": "lastGatherTime is the last time when Insights data gathering finished. An empty value means that no data has been gathered yet.",
+ "lastGatherDuration": "lastGatherDuration is the total time taken to process all gatherers during the last gather event.",
+ "gatherers": "gatherers is a list of active gatherers (and their statuses) in the last gathering.",
+}
+
+func (GatherStatus) SwaggerDoc() map[string]string {
+ return map_GatherStatus
+}
+
+var map_GathererStatus = map[string]string{
+ "": "gathererStatus represents information about a particular data gatherer.",
+ "conditions": "conditions provide details on the status of each gatherer.",
+ "name": "name is the name of the gatherer.",
+ "lastGatherDuration": "lastGatherDuration represents the time spent gathering.",
+}
+
+func (GathererStatus) SwaggerDoc() map[string]string {
+ return map_GathererStatus
+}
+
+var map_HealthCheck = map[string]string{
+ "": "healthCheck represents an Insights health check attributes.",
+ "description": "description provides basic description of the healtcheck.",
+ "totalRisk": "totalRisk of the healthcheck. Indicator of the total risk posed by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, and the higher the number, the more important the issue.",
+ "advisorURI": "advisorURI provides the URL link to the Insights Advisor.",
+ "state": "state determines what the current state of the health check is. Health check is enabled by default and can be disabled by the user in the Insights advisor user interface.",
+}
+
+func (HealthCheck) SwaggerDoc() map[string]string {
+ return map_HealthCheck
+}
+
+var map_InsightsOperator = map[string]string{
+ "": "\n\nInsightsOperator holds cluster-wide information about the Insights Operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the Insights.",
+ "status": "status is the most recently observed status of the Insights operator.",
+}
+
+func (InsightsOperator) SwaggerDoc() map[string]string {
+ return map_InsightsOperator
+}
+
+var map_InsightsOperatorList = map[string]string{
+ "": "InsightsOperatorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (InsightsOperatorList) SwaggerDoc() map[string]string {
+ return map_InsightsOperatorList
+}
+
+var map_InsightsOperatorStatus = map[string]string{
+ "gatherStatus": "gatherStatus provides basic information about the last Insights data gathering. When omitted, this means no data gathering has taken place yet.",
+ "insightsReport": "insightsReport provides general Insights analysis results. When omitted, this means no data gathering has taken place yet.",
+}
+
+func (InsightsOperatorStatus) SwaggerDoc() map[string]string {
+ return map_InsightsOperatorStatus
+}
+
+var map_InsightsReport = map[string]string{
+ "": "insightsReport provides Insights health check report based on the most recently sent Insights data.",
+ "downloadedAt": "downloadedAt is the time when the last Insights report was downloaded. An empty value means that there has not been any Insights report downloaded yet and it usually appears in disconnected clusters (or clusters when the Insights data gathering is disabled).",
+ "healthChecks": "healthChecks provides basic information about active Insights health checks in a cluster.",
+}
+
+func (InsightsReport) SwaggerDoc() map[string]string {
+ return map_InsightsReport
+}
+
+var map_KubeAPIServer = map[string]string{
+ "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the Kubernetes API Server",
+ "status": "status is the most recently observed status of the Kubernetes API Server",
+}
+
+func (KubeAPIServer) SwaggerDoc() map[string]string {
+ return map_KubeAPIServer
+}
+
+var map_KubeAPIServerList = map[string]string{
+ "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (KubeAPIServerList) SwaggerDoc() map[string]string {
+ return map_KubeAPIServerList
+}
+
+var map_KubeAPIServerStatus = map[string]string{
+ "serviceAccountIssuers": "serviceAccountIssuers tracks history of used service account issuers. The item without expiration time represents the currently used service account issuer. The other items represents service account issuers that were used previously and are still being trusted. The default expiration for the items is set by the platform and it defaults to 24h. see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection",
+}
+
+func (KubeAPIServerStatus) SwaggerDoc() map[string]string {
+ return map_KubeAPIServerStatus
+}
+
+var map_ServiceAccountIssuerStatus = map[string]string{
+ "name": "name is the name of the service account issuer",
+ "expirationTime": "expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list of service account issuers.",
+}
+
+func (ServiceAccountIssuerStatus) SwaggerDoc() map[string]string {
+ return map_ServiceAccountIssuerStatus
+}
+
+var map_KubeControllerManager = map[string]string{
+ "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager",
+ "status": "status is the most recently observed status of the Kubernetes Controller Manager",
+}
+
+func (KubeControllerManager) SwaggerDoc() map[string]string {
+ return map_KubeControllerManager
+}
+
+var map_KubeControllerManagerList = map[string]string{
+ "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (KubeControllerManagerList) SwaggerDoc() map[string]string {
+ return map_KubeControllerManagerList
+}
+
+var map_KubeControllerManagerSpec = map[string]string{
+ "useMoreSecureServiceCA": "useMoreSecureServiceCA indicates that the service-ca.crt provided in SA token volumes should include only enough certificates to validate service serving certificates. Once set to true, it cannot be set to false. Even if someone finds a way to set it back to false, the service-ca.crt files that previously existed will only have the more secure content.",
+}
+
+func (KubeControllerManagerSpec) SwaggerDoc() map[string]string {
+ return map_KubeControllerManagerSpec
+}
+
+var map_KubeStorageVersionMigrator = map[string]string{
+ "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string {
+ return map_KubeStorageVersionMigrator
+}
+
+var map_KubeStorageVersionMigratorList = map[string]string{
+ "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string {
+ return map_KubeStorageVersionMigratorList
+}
+
+var map_MachineConfiguration = map[string]string{
+ "": "MachineConfiguration provides information to configure an operator to manage Machine Configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the Machine Config Operator",
+ "status": "status is the most recently observed status of the Machine Config Operator",
+}
+
+func (MachineConfiguration) SwaggerDoc() map[string]string {
+ return map_MachineConfiguration
+}
+
+var map_MachineConfigurationList = map[string]string{
+ "": "MachineConfigurationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (MachineConfigurationList) SwaggerDoc() map[string]string {
+ return map_MachineConfigurationList
+}
+
+var map_MachineConfigurationSpec = map[string]string{
+ "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, no boot images will be updated.",
+ "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.",
+}
+
+func (MachineConfigurationSpec) SwaggerDoc() map[string]string {
+ return map_MachineConfigurationSpec
+}
+
+var map_MachineConfigurationStatus = map[string]string{
+ "observedGeneration": "observedGeneration is the last generation change you've dealt with",
+ "conditions": "conditions is a list of conditions and their status",
+ "nodeDisruptionPolicyStatus": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.",
+}
+
+func (MachineConfigurationStatus) SwaggerDoc() map[string]string {
+ return map_MachineConfigurationStatus
+}
+
+var map_MachineManager = map[string]string{
+ "": "MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information such as the resource type and the API Group of the resource. It also provides granular control via the selection field.",
+ "resource": "resource is the machine management resource's type. The only current valid value is machinesets. machinesets means that the machine manager will only register resources of the kind MachineSet.",
+ "apiGroup": "apiGroup is name of the APIGroup that the machine management resource belongs to. The only current valid value is machine.openshift.io. machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group.",
+ "selection": "selection allows granular control of the machine management resources that will be registered for boot image updates.",
+}
+
+func (MachineManager) SwaggerDoc() map[string]string {
+ return map_MachineManager
+}
+
+var map_MachineManagerSelector = map[string]string{
+ "mode": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated.",
+ "partial": "partial provides label selector(s) that can be used to match machine management resources. Only permitted when mode is set to \"Partial\".",
+}
+
+func (MachineManagerSelector) SwaggerDoc() map[string]string {
+ return map_MachineManagerSelector
+}
+
+var map_ManagedBootImages = map[string]string{
+ "machineManagers": "machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator will watch for changes to this list. Only one entry is permitted per type of machine management resource.",
+}
+
+func (ManagedBootImages) SwaggerDoc() map[string]string {
+ return map_ManagedBootImages
+}
+
+var map_NodeDisruptionPolicyClusterStatus = map[string]string{
+ "": "NodeDisruptionPolicyClusterStatus is the type for the status object, rendered by the controller as a merge of cluster defaults and user provided policies",
+ "files": "files is a list of MachineConfig file definitions and actions to take to changes on those paths",
+ "units": "units is a list MachineConfig unit definitions and actions to take on changes to those services",
+ "sshkey": "sshkey is the overall sshkey MachineConfig definition",
+}
+
+func (NodeDisruptionPolicyClusterStatus) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicyClusterStatus
+}
+
+var map_NodeDisruptionPolicyConfig = map[string]string{
+ "": "NodeDisruptionPolicyConfig is the overall spec definition for files/units/sshkeys",
+ "files": "files is a list of MachineConfig file definitions and actions to take to changes on those paths This list supports a maximum of 50 entries.",
+ "units": "units is a list MachineConfig unit definitions and actions to take on changes to those services This list supports a maximum of 50 entries.",
+ "sshkey": "sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this will apply to all sshkey changes in the cluster",
+}
+
+func (NodeDisruptionPolicyConfig) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicyConfig
+}
+
+var map_NodeDisruptionPolicySpecAction = map[string]string{
+ "type": "type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration",
+ "reload": "reload specifies the service to reload, only valid if type is reload",
+ "restart": "restart specifies the service to restart, only valid if type is restart",
+}
+
+func (NodeDisruptionPolicySpecAction) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicySpecAction
+}
+
+var map_NodeDisruptionPolicySpecFile = map[string]string{
+ "": "NodeDisruptionPolicySpecFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object",
+ "path": "path is the location of a file being managed through a MachineConfig. The Actions in the policy will apply to changes to the file at this path.",
+ "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.",
+}
+
+func (NodeDisruptionPolicySpecFile) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicySpecFile
+}
+
+var map_NodeDisruptionPolicySpecSSHKey = map[string]string{
+ "": "NodeDisruptionPolicySpecSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyConfig object",
+ "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.",
+}
+
+func (NodeDisruptionPolicySpecSSHKey) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicySpecSSHKey
+}
+
+var map_NodeDisruptionPolicySpecUnit = map[string]string{
+ "": "NodeDisruptionPolicySpecUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyConfig object",
+ "name": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".",
+ "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.",
+}
+
+func (NodeDisruptionPolicySpecUnit) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicySpecUnit
+}
+
+var map_NodeDisruptionPolicyStatus = map[string]string{
+ "clusterPolicies": "clusterPolicies is a merge of cluster default and user provided node disruption policies.",
+}
+
+func (NodeDisruptionPolicyStatus) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicyStatus
+}
+
+var map_NodeDisruptionPolicyStatusAction = map[string]string{
+ "type": "type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. reload/restart requires a corresponding service target specified in the reload/restart field. Other values require no further configuration",
+ "reload": "reload specifies the service to reload, only valid if type is reload",
+ "restart": "restart specifies the service to restart, only valid if type is restart",
+}
+
+func (NodeDisruptionPolicyStatusAction) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicyStatusAction
+}
+
+var map_NodeDisruptionPolicyStatusFile = map[string]string{
+ "": "NodeDisruptionPolicyStatusFile is a file entry and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object",
+ "path": "path is the location of a file being managed through a MachineConfig. The Actions in the policy will apply to changes to the file at this path.",
+ "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.",
+}
+
+func (NodeDisruptionPolicyStatusFile) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicyStatusFile
+}
+
+var map_NodeDisruptionPolicyStatusSSHKey = map[string]string{
+ "": "NodeDisruptionPolicyStatusSSHKey is actions to take for any SSHKey change and is used in the NodeDisruptionPolicyClusterStatus object",
+ "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.",
+}
+
+func (NodeDisruptionPolicyStatusSSHKey) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicyStatusSSHKey
+}
+
+var map_NodeDisruptionPolicyStatusUnit = map[string]string{
+ "": "NodeDisruptionPolicyStatusUnit is a systemd unit name and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus object",
+ "name": "name represents the service name of a systemd service managed through a MachineConfig Actions specified will be applied for changes to the named service. Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".",
+ "actions": "actions represents the series of commands to be executed on changes to the file at the corresponding file path. Actions will be applied in the order that they are set in this list. If there are other incoming changes to other MachineConfig entries in the same update that require a reboot, the reboot will supercede these actions. Valid actions are Reboot, Drain, Reload, DaemonReload and None. The Reboot action and the None action cannot be used in conjunction with any of the other actions. This list supports a maximum of 10 entries.",
+}
+
+func (NodeDisruptionPolicyStatusUnit) SwaggerDoc() map[string]string {
+ return map_NodeDisruptionPolicyStatusUnit
+}
+
+var map_PartialSelector = map[string]string{
+ "": "PartialSelector provides label selector(s) that can be used to match machine management resources.",
+ "machineResourceSelector": "machineResourceSelector is a label selector that can be used to select machine resources like MachineSets.",
+}
+
+func (PartialSelector) SwaggerDoc() map[string]string {
+ return map_PartialSelector
+}
+
+var map_ReloadService = map[string]string{
+ "": "ReloadService allows the user to specify the services to be reloaded",
+ "serviceName": "serviceName is the full name (e.g. crio.service) of the service to be reloaded Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".",
+}
+
+func (ReloadService) SwaggerDoc() map[string]string {
+ return map_ReloadService
+}
+
+var map_RestartService = map[string]string{
+ "": "RestartService allows the user to specify the services to be restarted",
+ "serviceName": "serviceName is the full name (e.g. crio.service) of the service to be restarted Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, \":\", \"-\", \"_\", \".\", and \"\". ${SERVICETYPE} must be one of \".service\", \".socket\", \".device\", \".mount\", \".automount\", \".swap\", \".target\", \".path\", \".timer\", \".snapshot\", \".slice\" or \".scope\".",
+}
+
+func (RestartService) SwaggerDoc() map[string]string {
+ return map_RestartService
+}
+
+var map_AdditionalNetworkDefinition = map[string]string{
+ "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.",
+ "type": "type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan",
+ "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.",
+ "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.",
+ "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD",
+ "simpleMacvlanConfig": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan",
+}
+
+func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string {
+ return map_AdditionalNetworkDefinition
+}
+
+var map_ClusterNetworkEntry = map[string]string{
+ "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks",
+}
+
+func (ClusterNetworkEntry) SwaggerDoc() map[string]string {
+ return map_ClusterNetworkEntry
+}
+
+var map_DefaultNetworkDefinition = map[string]string{
+ "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.",
+ "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw",
+ "openshiftSDNConfig": "openShiftSDNConfig configures the openshift-sdn plugin",
+ "ovnKubernetesConfig": "ovnKubernetesConfig configures the ovn-kubernetes plugin.",
+}
+
+func (DefaultNetworkDefinition) SwaggerDoc() map[string]string {
+ return map_DefaultNetworkDefinition
+}
+
+var map_EgressIPConfig = map[string]string{
+ "": "EgressIPConfig defines the configuration knobs for egressip",
+ "reachabilityTotalTimeoutSeconds": "reachabilityTotalTimeout configures the EgressIP node reachability check total timeout in seconds. If the EgressIP node cannot be reached within this timeout, the node is declared down. Setting a large value may cause the EgressIP feature to react slowly to node changes. In particular, it may react slowly for EgressIP nodes that really have a genuine problem and are unreachable. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 1 second. A value of 0 disables the EgressIP node's reachability check.",
+}
+
+func (EgressIPConfig) SwaggerDoc() map[string]string {
+ return map_EgressIPConfig
+}
+
+var map_ExportNetworkFlows = map[string]string{
+ "netFlow": "netFlow defines the NetFlow configuration.",
+ "sFlow": "sFlow defines the SFlow configuration.",
+ "ipfix": "ipfix defines IPFIX configuration.",
+}
+
+func (ExportNetworkFlows) SwaggerDoc() map[string]string {
+ return map_ExportNetworkFlows
+}
+
+var map_FeaturesMigration = map[string]string{
+ "egressIP": "egressIP specifies whether or not the Egress IP configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress IP configure is migrated.",
+ "egressFirewall": "egressFirewall specifies whether or not the Egress Firewall configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and Egress Firewall configure is migrated.",
+ "multicast": "multicast specifies whether or not the multicast configuration is migrated automatically when changing the cluster default network provider. If unset, this property defaults to 'true' and multicast configure is migrated.",
+}
+
+func (FeaturesMigration) SwaggerDoc() map[string]string {
+ return map_FeaturesMigration
+}
+
+var map_GatewayConfig = map[string]string{
+ "": "GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides",
+ "routingViaHost": "RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.",
+ "ipForwarding": "IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".",
+ "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values.",
+ "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values.",
+}
+
+func (GatewayConfig) SwaggerDoc() map[string]string {
+ return map_GatewayConfig
+}
+
+var map_HybridOverlayConfig = map[string]string{
+ "hybridClusterNetwork": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.",
+ "hybridOverlayVXLANPort": "HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789",
+}
+
+func (HybridOverlayConfig) SwaggerDoc() map[string]string {
+ return map_HybridOverlayConfig
+}
+
+var map_IPAMConfig = map[string]string{
+ "": "IPAMConfig contains configurations for IPAM (IP Address Management)",
+ "type": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic",
+ "staticIPAMConfig": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic",
+}
+
+func (IPAMConfig) SwaggerDoc() map[string]string {
+ return map_IPAMConfig
+}
+
+var map_IPFIXConfig = map[string]string{
+ "collectors": "ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items",
+}
+
+func (IPFIXConfig) SwaggerDoc() map[string]string {
+ return map_IPFIXConfig
+}
+
+var map_IPsecConfig = map[string]string{
+ "mode": "mode defines the behaviour of the ipsec configuration within the platform. Valid values are `Disabled`, `External` and `Full`. When 'Disabled', ipsec will not be enabled at the node level. When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), this is left to the user to configure.",
+}
+
+func (IPsecConfig) SwaggerDoc() map[string]string {
+ return map_IPsecConfig
+}
+
+var map_IPv4GatewayConfig = map[string]string{
+ "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes",
+ "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format",
+}
+
+func (IPv4GatewayConfig) SwaggerDoc() map[string]string {
+ return map_IPv4GatewayConfig
+}
+
+var map_IPv4OVNKubernetesConfig = map[string]string{
+ "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format",
+ "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The current default value is 100.64.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format",
+}
+
+func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string {
+ return map_IPv4OVNKubernetesConfig
+}
+
+var map_IPv6GatewayConfig = map[string]string{
+ "": "IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes",
+ "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted",
+}
+
+func (IPv6GatewayConfig) SwaggerDoc() map[string]string {
+ return map_IPv6GatewayConfig
+}
+
+var map_IPv6OVNKubernetesConfig = map[string]string{
+ "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accomadate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted",
+ "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/48 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted",
+}
+
+func (IPv6OVNKubernetesConfig) SwaggerDoc() map[string]string {
+ return map_IPv6OVNKubernetesConfig
+}
+
+var map_MTUMigration = map[string]string{
+ "": "MTUMigration MTU contains infomation about MTU migration.",
+ "network": "network contains information about MTU migration for the default network. Migrations are only allowed to MTU values lower than the machine's uplink MTU by the minimum appropriate offset.",
+ "machine": "machine contains MTU migration configuration for the machine's uplink. Needs to be migrated along with the default network MTU unless the current uplink MTU already accommodates the default network MTU.",
+}
+
+func (MTUMigration) SwaggerDoc() map[string]string {
+ return map_MTUMigration
+}
+
+var map_MTUMigrationValues = map[string]string{
+ "": "MTUMigrationValues contains the values for a MTU migration.",
+ "to": "to is the MTU to migrate to.",
+ "from": "from is the MTU to migrate from.",
+}
+
+func (MTUMigrationValues) SwaggerDoc() map[string]string {
+ return map_MTUMigrationValues
+}
+
+var map_NetFlowConfig = map[string]string{
+ "collectors": "netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items",
+}
+
+func (NetFlowConfig) SwaggerDoc() map[string]string {
+ return map_NetFlowConfig
+}
+
+var map_Network = map[string]string{
+ "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (Network) SwaggerDoc() map[string]string {
+ return map_Network
+}
+
+var map_NetworkList = map[string]string{
+ "": "NetworkList contains a list of Network configurations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (NetworkList) SwaggerDoc() map[string]string {
+ return map_NetworkList
+}
+
+var map_NetworkMigration = map[string]string{
+ "": "NetworkMigration represents the cluster network configuration.",
+ "networkType": "networkType is the target type of network migration. Set this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. The supported values are OpenShiftSDN, OVNKubernetes",
+ "mtu": "mtu contains the MTU migration configuration. Set this to allow changing the MTU values for the default network. If unset, the operation of changing the MTU for the default network will be rejected.",
+ "features": "features contains the features migration configuration. Set this to migrate feature configuration when changing the cluster default network provider. if unset, the default operation is to migrate all the configuration of supported features.",
+ "mode": "mode indicates the mode of network migration. The supported values are \"Live\", \"Offline\" and omitted. A \"Live\" migration operation will not cause service interruption by migrating the CNI of each node one by one. The cluster network will work as normal during the network migration. An \"Offline\" migration operation will cause service interruption. During an \"Offline\" migration, two rounds of node reboots are required. The cluster network will be malfunctioning during the network migration. When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default value is \"Offline\".",
+}
+
+func (NetworkMigration) SwaggerDoc() map[string]string {
+ return map_NetworkMigration
+}
+
+var map_NetworkSpec = map[string]string{
+ "": "NetworkSpec is the top-level network configuration object.",
+ "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.",
+ "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.",
+ "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive",
+ "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.",
+ "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.",
+ "useMultiNetworkPolicy": "useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored.",
+ "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise.",
+ "disableNetworkDiagnostics": "disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks.",
+ "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn.",
+ "exportNetworkFlows": "exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector.",
+ "migration": "migration enables and configures the cluster network migration. The migration procedure allows to change the network type and the MTU.",
+}
+
+func (NetworkSpec) SwaggerDoc() map[string]string {
+ return map_NetworkSpec
+}
+
+var map_NetworkStatus = map[string]string{
+ "": "NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object.",
+}
+
+func (NetworkStatus) SwaggerDoc() map[string]string {
+ return map_NetworkStatus
+}
+
+var map_OVNKubernetesConfig = map[string]string{
+ "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project",
+ "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400",
+ "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081",
+ "hybridOverlayConfig": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.",
+ "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.",
+ "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.",
+ "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.",
+ "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16",
+ "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/48",
+ "egressIPConfig": "egressIPConfig holds the configuration for EgressIP options.",
+ "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.",
+ "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.",
+}
+
+func (OVNKubernetesConfig) SwaggerDoc() map[string]string {
+ return map_OVNKubernetesConfig
+}
+
+var map_OpenShiftSDNConfig = map[string]string{
+ "": "OpenShiftSDNConfig configures the three openshift-sdn plugins",
+ "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"",
+ "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.",
+ "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.",
+ "useExternalOpenvswitch": "useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6",
+ "enableUnidling": "enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled.",
+}
+
+func (OpenShiftSDNConfig) SwaggerDoc() map[string]string {
+ return map_OpenShiftSDNConfig
+}
+
+var map_PolicyAuditConfig = map[string]string{
+ "rateLimit": "rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used.",
+ "maxFileSize": "maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB",
+ "maxLogFiles": "maxLogFiles specifies the maximum number of ACL_audit log files that can be present.",
+ "destination": "destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - \"libc\" -> to use the libc syslog() function of the host node's journdald process - \"udp:host:port\" -> for sending syslog over UDP - \"unix:file\" -> for using the UNIX domain socket directly - \"null\" -> to discard all messages logged to syslog The default is \"null\"",
+ "syslogFacility": "syslogFacility the RFC5424 facility for generated messages, e.g. \"kern\". Default is \"local0\"",
+}
+
+func (PolicyAuditConfig) SwaggerDoc() map[string]string {
+ return map_PolicyAuditConfig
+}
+
+var map_ProxyConfig = map[string]string{
+ "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults",
+ "iptablesSyncPeriod": "An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s",
+ "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0",
+ "proxyArguments": "Any additional arguments to pass to the kubeproxy process",
+}
+
+func (ProxyConfig) SwaggerDoc() map[string]string {
+ return map_ProxyConfig
+}
+
+var map_SFlowConfig = map[string]string{
+ "collectors": "sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items",
+}
+
+func (SFlowConfig) SwaggerDoc() map[string]string {
+ return map_SFlowConfig
+}
+
+var map_SimpleMacvlanConfig = map[string]string{
+ "": "SimpleMacvlanConfig contains configurations for macvlan interface.",
+ "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface",
+ "ipamConfig": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).",
+ "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge",
+ "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.",
+}
+
+func (SimpleMacvlanConfig) SwaggerDoc() map[string]string {
+ return map_SimpleMacvlanConfig
+}
+
+var map_StaticIPAMAddresses = map[string]string{
+ "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses",
+ "address": "Address is the IP address in CIDR format",
+ "gateway": "Gateway is IP inside of subnet to designate as the gateway",
+}
+
+func (StaticIPAMAddresses) SwaggerDoc() map[string]string {
+ return map_StaticIPAMAddresses
+}
+
+var map_StaticIPAMConfig = map[string]string{
+ "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)",
+ "addresses": "Addresses configures IP address for the interface",
+ "routes": "Routes configures IP routes for the interface",
+ "dns": "DNS configures DNS for the interface",
+}
+
+func (StaticIPAMConfig) SwaggerDoc() map[string]string {
+ return map_StaticIPAMConfig
+}
+
+var map_StaticIPAMDNS = map[string]string{
+ "": "StaticIPAMDNS provides DNS related information for static IPAM",
+ "nameservers": "Nameservers points DNS servers for IP lookup",
+ "domain": "Domain configures the domainname the local domain used for short hostname lookups",
+ "search": "Search configures priority ordered search domains for short hostname lookups",
+}
+
+func (StaticIPAMDNS) SwaggerDoc() map[string]string {
+ return map_StaticIPAMDNS
+}
+
+var map_StaticIPAMRoutes = map[string]string{
+ "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes",
+ "destination": "Destination points the IP route destination",
+ "gateway": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).",
+}
+
+func (StaticIPAMRoutes) SwaggerDoc() map[string]string {
+ return map_StaticIPAMRoutes
+}
+
+var map_OpenShiftAPIServer = map[string]string{
+ "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the OpenShift API Server.",
+ "status": "status defines the observed status of the OpenShift API Server.",
+}
+
+func (OpenShiftAPIServer) SwaggerDoc() map[string]string {
+ return map_OpenShiftAPIServer
+}
+
+var map_OpenShiftAPIServerList = map[string]string{
+ "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (OpenShiftAPIServerList) SwaggerDoc() map[string]string {
+ return map_OpenShiftAPIServerList
+}
+
+var map_OpenShiftAPIServerStatus = map[string]string{
+ "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.",
+}
+
+func (OpenShiftAPIServerStatus) SwaggerDoc() map[string]string {
+ return map_OpenShiftAPIServerStatus
+}
+
+var map_OpenShiftControllerManager = map[string]string{
+ "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (OpenShiftControllerManager) SwaggerDoc() map[string]string {
+ return map_OpenShiftControllerManager
+}
+
+var map_OpenShiftControllerManagerList = map[string]string{
+ "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string {
+ return map_OpenShiftControllerManagerList
+}
+
+var map_KubeScheduler = map[string]string{
+ "": "KubeScheduler provides information to configure an operator to manage scheduler.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler",
+ "status": "status is the most recently observed status of the Kubernetes Scheduler",
+}
+
+func (KubeScheduler) SwaggerDoc() map[string]string {
+ return map_KubeScheduler
+}
+
+var map_KubeSchedulerList = map[string]string{
+ "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (KubeSchedulerList) SwaggerDoc() map[string]string {
+ return map_KubeSchedulerList
+}
+
+var map_ServiceCA = map[string]string{
+ "": "ServiceCA provides information to configure an operator to manage the service cert controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (ServiceCA) SwaggerDoc() map[string]string {
+ return map_ServiceCA
+}
+
+var map_ServiceCAList = map[string]string{
+ "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (ServiceCAList) SwaggerDoc() map[string]string {
+ return map_ServiceCAList
+}
+
+var map_ServiceCatalogAPIServer = map[string]string{
+ "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string {
+ return map_ServiceCatalogAPIServer
+}
+
+var map_ServiceCatalogAPIServerList = map[string]string{
+ "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string {
+ return map_ServiceCatalogAPIServerList
+}
+
+var map_ServiceCatalogControllerManager = map[string]string{
+ "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string {
+ return map_ServiceCatalogControllerManager
+}
+
+var map_ServiceCatalogControllerManagerList = map[string]string{
+ "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string {
+ return map_ServiceCatalogControllerManagerList
+}
+
+var map_Storage = map[string]string{
+ "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (Storage) SwaggerDoc() map[string]string {
+ return map_Storage
+}
+
+var map_StorageList = map[string]string{
+ "": "StorageList contains a list of Storages.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (StorageList) SwaggerDoc() map[string]string {
+ return map_StorageList
+}
+
+var map_StorageSpec = map[string]string{
+ "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.",
+ "vsphereStorageDriver": "VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.",
+}
+
+func (StorageSpec) SwaggerDoc() map[string]string {
+ return map_StorageSpec
+}
+
+var map_StorageStatus = map[string]string{
+ "": "StorageStatus defines the observed status of the cluster storage operator.",
+}
+
+func (StorageStatus) SwaggerDoc() map[string]string {
+ return map_StorageStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/Makefile b/vendor/github.com/openshift/api/operator/v1alpha1/Makefile
new file mode 100644
index 0000000000..9cf3483822
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="operator.openshift.io/v1alpha1"
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/doc.go b/vendor/github.com/openshift/api/operator/v1alpha1/doc.go
new file mode 100644
index 0000000000..9d18719532
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/doc.go
@@ -0,0 +1,6 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=operator.openshift.io
+package v1alpha1
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/register.go b/vendor/github.com/openshift/api/operator/v1alpha1/register.go
new file mode 100644
index 0000000000..0921431c05
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/register.go
@@ -0,0 +1,45 @@
+package v1alpha1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "operator.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+
+ scheme.AddKnownTypes(GroupVersion,
+ &GenericOperatorConfig{},
+ &ImageContentSourcePolicy{},
+ &ImageContentSourcePolicyList{},
+ &OLM{},
+ &OLMList{},
+ &EtcdBackup{},
+ &EtcdBackupList{},
+ )
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types.go b/vendor/github.com/openshift/api/operator/v1alpha1/types.go
new file mode 100644
index 0000000000..4d5a207e6a
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/types.go
@@ -0,0 +1,204 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// DEPRECATED: Use v1.ManagementState instead
+type ManagementState string
+
+const (
+ // Managed means that the operator is actively managing its resources and trying to keep the component active
+ // DEPRECATED: Use v1.Managed instead
+ Managed ManagementState = "Managed"
+ // Unmanaged means that the operator is not taking any action related to the component
+ // DEPRECATED: Use v1.Unmanaged instead
+ Unmanaged ManagementState = "Unmanaged"
+ // Removed means that the operator is actively managing its resources and trying to remove all traces of the component
+ // DEPRECATED: Use v1.Removed instead
+ Removed ManagementState = "Removed"
+)
+
+// OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included
+// inside of the Spec struct for you particular operator.
+// DEPRECATED: Use v1.OperatorSpec instead
+type OperatorSpec struct {
+ // managementState indicates whether and how the operator should manage the component
+ ManagementState ManagementState `json:"managementState"`
+
+ // imagePullSpec is the image to use for the component.
+ ImagePullSpec string `json:"imagePullSpec"`
+
+ // imagePullPolicy specifies the image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified,
+ // or IfNotPresent otherwise.
+ ImagePullPolicy string `json:"imagePullPolicy"`
+
+ // version is the desired state in major.minor.micro-patch. Usually patch is ignored.
+ Version string `json:"version"`
+
+ // logging contains glog parameters for the component pods. It's always a command line arg for the moment
+ Logging LoggingConfig `json:"logging,omitempty"`
+}
+
+// LoggingConfig holds information about configuring logging
+// DEPRECATED: Use v1.LogLevel instead
+type LoggingConfig struct {
+ // level is passed to glog.
+ Level int64 `json:"level"`
+
+ // vmodule is passed to glog.
+ Vmodule string `json:"vmodule"`
+}
+
+// DEPRECATED: Use v1.ConditionStatus instead
+type ConditionStatus string
+
+const (
+ // DEPRECATED: Use v1.ConditionTrue instead
+ ConditionTrue ConditionStatus = "True"
+ // DEPRECATED: Use v1.ConditionFalse instead
+ ConditionFalse ConditionStatus = "False"
+ // DEPRECATED: Use v1.ConditionUnknown instead
+ ConditionUnknown ConditionStatus = "Unknown"
+
+ // these conditions match the conditions for the ClusterOperator type.
+ // DEPRECATED: Use v1.OperatorStatusTypeAvailable instead
+ OperatorStatusTypeAvailable = "Available"
+ // DEPRECATED: Use v1.OperatorStatusTypeProgressing instead
+ OperatorStatusTypeProgressing = "Progressing"
+ // DEPRECATED: Use v1.OperatorStatusTypeDegraded instead
+ OperatorStatusTypeFailing = "Failing"
+
+ // DEPRECATED: Use v1.OperatorStatusTypeProgressing instead
+ OperatorStatusTypeMigrating = "Migrating"
+ // TODO this is going to be removed
+ // DEPRECATED: Use v1.OperatorStatusTypeAvailable instead
+ OperatorStatusTypeSyncSuccessful = "SyncSuccessful"
+)
+
+// OperatorCondition is just the standard condition fields.
+// DEPRECATED: Use v1.OperatorCondition instead
+type OperatorCondition struct {
+ Type string `json:"type"`
+ Status ConditionStatus `json:"status"`
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason string `json:"reason,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+// VersionAvailability gives information about the synchronization and operational status of a particular version of the component
+// DEPRECATED: Use fields in v1.OperatorStatus instead
+type VersionAvailability struct {
+ // version is the level this availability applies to
+ Version string `json:"version"`
+ // updatedReplicas indicates how many replicas are at the desired state
+ UpdatedReplicas int32 `json:"updatedReplicas"`
+ // readyReplicas indicates how many replicas are ready and at the desired state
+ ReadyReplicas int32 `json:"readyReplicas"`
+ // errors indicates what failures are associated with the operator trying to manage this version
+ Errors []string `json:"errors"`
+ // generations allows an operator to track what the generation of "important" resources was the last time we updated them
+ Generations []GenerationHistory `json:"generations"`
+}
+
+// GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made.
+// DEPRECATED: Use fields in v1.GenerationStatus instead
+type GenerationHistory struct {
+ // group is the group of the thing you're tracking
+ Group string `json:"group"`
+ // resource is the resource type of the thing you're tracking
+ Resource string `json:"resource"`
+ // namespace is where the thing you're tracking is
+ Namespace string `json:"namespace"`
+ // name is the name of the thing you're tracking
+ Name string `json:"name"`
+ // lastGeneration is the last generation of the workload controller involved
+ LastGeneration int64 `json:"lastGeneration"`
+}
+
+// OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included
+// inside of the Status struct for you particular operator.
+// DEPRECATED: Use v1.OperatorStatus instead
+type OperatorStatus struct {
+ // observedGeneration is the last generation change you've dealt with
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // conditions is a list of conditions and their status
+ Conditions []OperatorCondition `json:"conditions,omitempty"`
+
+ // state indicates what the operator has observed to be its current operational status.
+ State ManagementState `json:"state,omitempty"`
+ // taskSummary is a high level summary of what the controller is currently attempting to do. It is high-level, human-readable
+ // and not guaranteed in any way. (I needed this for debugging and realized it made a great summary).
+ TaskSummary string `json:"taskSummary,omitempty"`
+
+ // currentVersionAvailability is availability information for the current version. If it is unmanged or removed, this doesn't exist.
+ CurrentAvailability *VersionAvailability `json:"currentVersionAvailability,omitempty"`
+ // targetVersionAvailability is availability information for the target version if we are migrating
+ TargetAvailability *VersionAvailability `json:"targetVersionAvailability,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GenericOperatorConfig provides information to configure an operator
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:internal
+type GenericOperatorConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ServingInfo is the HTTP serving information for the controller's endpoints
+ ServingInfo configv1.HTTPServingInfo `json:"servingInfo,omitempty"`
+
+ // leaderElection provides information to elect a leader. Only override this if you have a specific need
+ LeaderElection configv1.LeaderElection `json:"leaderElection,omitempty"`
+
+ // authentication allows configuration of authentication for the endpoints
+ Authentication DelegatedAuthentication `json:"authentication,omitempty"`
+ // authorization allows configuration of authentication for the endpoints
+ Authorization DelegatedAuthorization `json:"authorization,omitempty"`
+}
+
+// DelegatedAuthentication allows authentication to be disabled.
+type DelegatedAuthentication struct {
+ // disabled indicates that authentication should be disabled. By default it will use delegated authentication.
+ Disabled bool `json:"disabled,omitempty"`
+}
+
+// DelegatedAuthorization allows authorization to be disabled.
+type DelegatedAuthorization struct {
+ // disabled indicates that authorization should be disabled. By default it will use delegated authorization.
+ Disabled bool `json:"disabled,omitempty"`
+}
+
+// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual
+// node status must be tracked.
+// DEPRECATED: Use v1.StaticPodOperatorStatus instead
+type StaticPodOperatorStatus struct {
+ OperatorStatus `json:",inline"`
+
+ // latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment
+ LatestAvailableDeploymentGeneration int32 `json:"latestAvailableDeploymentGeneration"`
+
+ // nodeStatuses track the deployment values and errors across individual nodes
+ NodeStatuses []NodeStatus `json:"nodeStatuses"`
+}
+
+// NodeStatus provides information about the current state of a particular node managed by this operator.
+// Deprecated: Use v1.NodeStatus instead
+type NodeStatus struct {
+ // nodeName is the name of the node
+ NodeName string `json:"nodeName"`
+
+ // currentDeploymentGeneration is the generation of the most recently successful deployment
+ CurrentDeploymentGeneration int32 `json:"currentDeploymentGeneration"`
+ // targetDeploymentGeneration is the generation of the deployment we're trying to apply
+ TargetDeploymentGeneration int32 `json:"targetDeploymentGeneration"`
+ // lastFailedDeploymentGeneration is the generation of the deployment we tried and failed to deploy.
+ LastFailedDeploymentGeneration int32 `json:"lastFailedDeploymentGeneration"`
+
+ // lastFailedDeploymentGenerationErrors is a list of the errors during the failed deployment referenced in lastFailedDeploymentGeneration
+ LastFailedDeploymentErrors []string `json:"lastFailedDeploymentErrors"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go
new file mode 100644
index 0000000000..2654f57008
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go
@@ -0,0 +1,106 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+//
+// # EtcdBackup provides configuration options and status for a one-time backup attempt of the etcd cluster
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=etcdbackups,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1482
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=etcd,operatorOrdering=01
+// +openshift:enable:FeatureGate=AutomatedEtcdBackup
+type EtcdBackup struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec EtcdBackupSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +kubebuilder:validation:Optional
+ // +optional
+ Status EtcdBackupStatus `json:"status"`
+}
+
+type EtcdBackupSpec struct {
+ // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the
+ // etcd backup file would be saved
+ // The PVC itself must always be created in the "openshift-etcd" namespace
+ // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup.
+ // In the future this would be backups saved across the control-plane master nodes.
+ // +kubebuilder:validation:Optional
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="pvcName is immutable once set"
+ PVCName string `json:"pvcName"`
+}
+
+// +kubebuilder:validation:Optional
+type EtcdBackupStatus struct {
+ // conditions provide details on the status of the etcd backup job.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"`
+
+ // backupJob is the reference to the Job that executes the backup.
+ // Optional
+ // +kubebuilder:validation:Optional
+ BackupJob *BackupJobReference `json:"backupJob"`
+}
+
+// BackupJobReference holds a reference to the batch/v1 Job created to run the etcd backup
+type BackupJobReference struct {
+
+ // namespace is the namespace of the Job.
+ // this is always expected to be "openshift-etcd" since the user provided PVC
+ // is also required to be in "openshift-etcd"
+ // Required
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern:=`^openshift-etcd$`
+ Namespace string `json:"namespace"`
+
+ // name is the name of the Job.
+ // Required
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+}
+
+type BackupConditionReason string
+
+var (
+ // BackupPending is added to the EtcdBackupStatus Conditions when the etcd backup is pending.
+ BackupPending BackupConditionReason = "BackupPending"
+
+ // BackupCompleted is added to the EtcdBackupStatus Conditions when the etcd backup has completed.
+ BackupCompleted BackupConditionReason = "BackupCompleted"
+
+ // BackupFailed is added to the EtcdBackupStatus Conditions when the etcd backup has failed.
+ BackupFailed BackupConditionReason = "BackupFailed"
+
+ // BackupSkipped is added to the EtcdBackupStatus Conditions when the etcd backup has been skipped.
+ BackupSkipped BackupConditionReason = "BackupSkipped"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EtcdBackupList is a collection of items
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type EtcdBackupList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+ Items []EtcdBackup `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go
new file mode 100644
index 0000000000..6e14720dd3
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go
@@ -0,0 +1,84 @@
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules.
+// When multiple policies are defined, the outcome of the behavior is defined on each field.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=imagecontentsourcepolicies,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01
+// +openshift:compatibility-gen:level=4
+type ImageContentSourcePolicy struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ImageContentSourcePolicySpec `json:"spec"`
+}
+
+// ImageContentSourcePolicySpec is the specification of the ImageContentSourcePolicy CRD.
+type ImageContentSourcePolicySpec struct {
+ // repositoryDigestMirrors allows images referenced by image digests in pods to be
+ // pulled from alternative mirrored repository locations. The image pull specification
+ // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors
+ // and the image may be pulled down from any of the mirrors in the list instead of the
+ // specified repository allowing administrators to choose a potentially faster mirror.
+ // Only image pull specifications that have an image digest will have this behavior applied
+ // to them - tags will continue to be pulled from the specified repository in the pull spec.
+ //
+ // Each “source” repository is treated independently; configurations for different “source”
+ // repositories don’t interact.
+ //
+ // When multiple policies are defined for the same “source” repository, the sets of defined
+ // mirrors will be merged together, preserving the relative order of the mirrors, if possible.
+ // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the
+ // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict
+ // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.
+ // +optional
+ RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type ImageContentSourcePolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ImageContentSourcePolicy `json:"items"`
+}
+
+// RepositoryDigestMirrors holds cluster-wide information about how to handle mirros in the registries config.
+// Note: the mirrors only work when pulling the images that are referenced by their digests.
+type RepositoryDigestMirrors struct {
+ // source is the repository that users refer to, e.g. in image pull specifications.
+ // +required
+ Source string `json:"source"`
+ // mirrors is one or more repositories that may also contain the same images.
+ // The order of mirrors in this list is treated as the user's desired priority, while source
+ // is by default considered lower priority than all mirrors. Other cluster configuration,
+ // including (but not limited to) other repositoryDigestMirrors objects,
+ // may impact the exact order mirrors are contacted in, or some mirrors may be contacted
+ // in parallel, so this should be considered a preference rather than a guarantee of ordering.
+ // +optional
+ Mirrors []string `json:"mirrors"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go
new file mode 100644
index 0000000000..f29385b9fa
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go
@@ -0,0 +1,62 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OLM provides information to configure an operator to manage the OLM controllers
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=olms,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01
+// +openshift:enable:FeatureGate=NewOLM
+// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="olm is a singleton, .metadata.name must be 'cluster'"
+type OLM struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ //spec holds user settable values for configuration
+ // +kubebuilder:validation:Required
+ Spec OLMSpec `json:"spec"`
+ // status holds observed values from the cluster. They may not be overridden.
+ // +optional
+ Status OLMStatus `json:"status"`
+}
+
+type OLMSpec struct {
+ operatorv1.OperatorSpec `json:",inline"`
+}
+
+type OLMStatus struct {
+ operatorv1.OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OLMList is a collection of items
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type OLMList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []OLM `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..08ef2811a5
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,562 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupJobReference) DeepCopyInto(out *BackupJobReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupJobReference.
+func (in *BackupJobReference) DeepCopy() *BackupJobReference {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupJobReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication.
+func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication {
+ if in == nil {
+ return nil
+ }
+ out := new(DelegatedAuthentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization.
+func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization {
+ if in == nil {
+ return nil
+ }
+ out := new(DelegatedAuthorization)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdBackup) DeepCopyInto(out *EtcdBackup) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackup.
+func (in *EtcdBackup) DeepCopy() *EtcdBackup {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdBackup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EtcdBackup) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdBackupList) DeepCopyInto(out *EtcdBackupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EtcdBackup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupList.
+func (in *EtcdBackupList) DeepCopy() *EtcdBackupList {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdBackupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EtcdBackupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupSpec.
+func (in *EtcdBackupSpec) DeepCopy() *EtcdBackupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdBackupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EtcdBackupStatus) DeepCopyInto(out *EtcdBackupStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.BackupJob != nil {
+ in, out := &in.BackupJob, &out.BackupJob
+ *out = new(BackupJobReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupStatus.
+func (in *EtcdBackupStatus) DeepCopy() *EtcdBackupStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EtcdBackupStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenerationHistory) DeepCopyInto(out *GenerationHistory) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationHistory.
+func (in *GenerationHistory) DeepCopy() *GenerationHistory {
+ if in == nil {
+ return nil
+ }
+ out := new(GenerationHistory)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GenericOperatorConfig) DeepCopyInto(out *GenericOperatorConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ServingInfo.DeepCopyInto(&out.ServingInfo)
+ out.LeaderElection = in.LeaderElection
+ out.Authentication = in.Authentication
+ out.Authorization = in.Authorization
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericOperatorConfig.
+func (in *GenericOperatorConfig) DeepCopy() *GenericOperatorConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GenericOperatorConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GenericOperatorConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageContentSourcePolicy) DeepCopyInto(out *ImageContentSourcePolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicy.
+func (in *ImageContentSourcePolicy) DeepCopy() *ImageContentSourcePolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageContentSourcePolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageContentSourcePolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageContentSourcePolicyList) DeepCopyInto(out *ImageContentSourcePolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ImageContentSourcePolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicyList.
+func (in *ImageContentSourcePolicyList) DeepCopy() *ImageContentSourcePolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageContentSourcePolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageContentSourcePolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageContentSourcePolicySpec) DeepCopyInto(out *ImageContentSourcePolicySpec) {
+ *out = *in
+ if in.RepositoryDigestMirrors != nil {
+ in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors
+ *out = make([]RepositoryDigestMirrors, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicySpec.
+func (in *ImageContentSourcePolicySpec) DeepCopy() *ImageContentSourcePolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ImageContentSourcePolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoggingConfig) DeepCopyInto(out *LoggingConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfig.
+func (in *LoggingConfig) DeepCopy() *LoggingConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(LoggingConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
+ *out = *in
+ if in.LastFailedDeploymentErrors != nil {
+ in, out := &in.LastFailedDeploymentErrors, &out.LastFailedDeploymentErrors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
+func (in *NodeStatus) DeepCopy() *NodeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLM) DeepCopyInto(out *OLM) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLM.
+func (in *OLM) DeepCopy() *OLM {
+ if in == nil {
+ return nil
+ }
+ out := new(OLM)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OLM) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLMList) DeepCopyInto(out *OLMList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OLM, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMList.
+func (in *OLMList) DeepCopy() *OLMList {
+ if in == nil {
+ return nil
+ }
+ out := new(OLMList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OLMList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLMSpec) DeepCopyInto(out *OLMSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMSpec.
+func (in *OLMSpec) DeepCopy() *OLMSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OLMSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OLMStatus) DeepCopyInto(out *OLMStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMStatus.
+func (in *OLMStatus) DeepCopy() *OLMStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OLMStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition.
+func (in *OperatorCondition) DeepCopy() *OperatorCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) {
+ *out = *in
+ out.Logging = in.Logging
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec.
+func (in *OperatorSpec) DeepCopy() *OperatorSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]OperatorCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.CurrentAvailability != nil {
+ in, out := &in.CurrentAvailability, &out.CurrentAvailability
+ *out = new(VersionAvailability)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TargetAvailability != nil {
+ in, out := &in.TargetAvailability, &out.TargetAvailability
+ *out = new(VersionAvailability)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus.
+func (in *OperatorStatus) DeepCopy() *OperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) {
+ *out = *in
+ if in.Mirrors != nil {
+ in, out := &in.Mirrors, &out.Mirrors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors.
+func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors {
+ if in == nil {
+ return nil
+ }
+ out := new(RepositoryDigestMirrors)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ if in.NodeStatuses != nil {
+ in, out := &in.NodeStatuses, &out.NodeStatuses
+ *out = make([]NodeStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus.
+func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StaticPodOperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VersionAvailability) DeepCopyInto(out *VersionAvailability) {
+ *out = *in
+ if in.Errors != nil {
+ in, out := &in.Errors, &out.Errors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Generations != nil {
+ in, out := &in.Generations, &out.Generations
+ *out = make([]GenerationHistory, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionAvailability.
+func (in *VersionAvailability) DeepCopy() *VersionAvailability {
+ if in == nil {
+ return nil
+ }
+ out := new(VersionAvailability)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..2b6cbef275
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,67 @@
+etcdbackups.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1482
+ CRDName: etcdbackups.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - AutomatedEtcdBackup
+ FilenameOperatorName: etcd
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: EtcdBackup
+ Labels: {}
+ PluralName: etcdbackups
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates:
+ - AutomatedEtcdBackup
+ Version: v1alpha1
+
+imagecontentsourcepolicies.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: imagecontentsourcepolicies.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: ImageContentSourcePolicy
+ Labels: {}
+ PluralName: imagecontentsourcepolicies
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1alpha1
+
+olms.operator.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/1504
+ CRDName: olms.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates:
+ - NewOLM
+ FilenameOperatorName: operator-lifecycle-manager
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: operator.openshift.io
+ HasStatus: true
+ KindName: OLM
+ Labels: {}
+ PluralName: olms
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates:
+ - NewOLM
+ Version: v1alpha1
+
diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..c8cce688f4
--- /dev/null
+++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,242 @@
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_DelegatedAuthentication = map[string]string{
+ "": "DelegatedAuthentication allows authentication to be disabled.",
+ "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.",
+}
+
+func (DelegatedAuthentication) SwaggerDoc() map[string]string {
+ return map_DelegatedAuthentication
+}
+
+var map_DelegatedAuthorization = map[string]string{
+ "": "DelegatedAuthorization allows authorization to be disabled.",
+ "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.",
+}
+
+func (DelegatedAuthorization) SwaggerDoc() map[string]string {
+ return map_DelegatedAuthorization
+}
+
+var map_GenerationHistory = map[string]string{
+ "": "GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made. DEPRECATED: Use fields in v1.GenerationStatus instead",
+ "group": "group is the group of the thing you're tracking",
+ "resource": "resource is the resource type of the thing you're tracking",
+ "namespace": "namespace is where the thing you're tracking is",
+ "name": "name is the name of the thing you're tracking",
+ "lastGeneration": "lastGeneration is the last generation of the workload controller involved",
+}
+
+func (GenerationHistory) SwaggerDoc() map[string]string {
+ return map_GenerationHistory
+}
+
+var map_GenericOperatorConfig = map[string]string{
+ "": "GenericOperatorConfig provides information to configure an operator\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints",
+ "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need",
+ "authentication": "authentication allows configuration of authentication for the endpoints",
+ "authorization": "authorization allows configuration of authentication for the endpoints",
+}
+
+func (GenericOperatorConfig) SwaggerDoc() map[string]string {
+ return map_GenericOperatorConfig
+}
+
+var map_LoggingConfig = map[string]string{
+ "": "LoggingConfig holds information about configuring logging DEPRECATED: Use v1.LogLevel instead",
+ "level": "level is passed to glog.",
+ "vmodule": "vmodule is passed to glog.",
+}
+
+func (LoggingConfig) SwaggerDoc() map[string]string {
+ return map_LoggingConfig
+}
+
+var map_NodeStatus = map[string]string{
+ "": "NodeStatus provides information about the current state of a particular node managed by this operator. Deprecated: Use v1.NodeStatus instead",
+ "nodeName": "nodeName is the name of the node",
+ "currentDeploymentGeneration": "currentDeploymentGeneration is the generation of the most recently successful deployment",
+ "targetDeploymentGeneration": "targetDeploymentGeneration is the generation of the deployment we're trying to apply",
+ "lastFailedDeploymentGeneration": "lastFailedDeploymentGeneration is the generation of the deployment we tried and failed to deploy.",
+ "lastFailedDeploymentErrors": "lastFailedDeploymentGenerationErrors is a list of the errors during the failed deployment referenced in lastFailedDeploymentGeneration",
+}
+
+func (NodeStatus) SwaggerDoc() map[string]string {
+ return map_NodeStatus
+}
+
+var map_OperatorCondition = map[string]string{
+ "": "OperatorCondition is just the standard condition fields. DEPRECATED: Use v1.OperatorCondition instead",
+}
+
+func (OperatorCondition) SwaggerDoc() map[string]string {
+ return map_OperatorCondition
+}
+
+var map_OperatorSpec = map[string]string{
+ "": "OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included inside of the Spec struct for you particular operator. DEPRECATED: Use v1.OperatorSpec instead",
+ "managementState": "managementState indicates whether and how the operator should manage the component",
+ "imagePullSpec": "imagePullSpec is the image to use for the component.",
+ "imagePullPolicy": "imagePullPolicy specifies the image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.",
+ "version": "version is the desired state in major.minor.micro-patch. Usually patch is ignored.",
+ "logging": "logging contains glog parameters for the component pods. It's always a command line arg for the moment",
+}
+
+func (OperatorSpec) SwaggerDoc() map[string]string {
+ return map_OperatorSpec
+}
+
+var map_OperatorStatus = map[string]string{
+ "": "OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included inside of the Status struct for you particular operator. DEPRECATED: Use v1.OperatorStatus instead",
+ "observedGeneration": "observedGeneration is the last generation change you've dealt with",
+ "conditions": "conditions is a list of conditions and their status",
+ "state": "state indicates what the operator has observed to be its current operational status.",
+ "taskSummary": "taskSummary is a high level summary of what the controller is currently attempting to do. It is high-level, human-readable and not guaranteed in any way. (I needed this for debugging and realized it made a great summary).",
+ "currentVersionAvailability": "currentVersionAvailability is availability information for the current version. If it is unmanged or removed, this doesn't exist.",
+ "targetVersionAvailability": "targetVersionAvailability is availability information for the target version if we are migrating",
+}
+
+func (OperatorStatus) SwaggerDoc() map[string]string {
+ return map_OperatorStatus
+}
+
+var map_StaticPodOperatorStatus = map[string]string{
+ "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked. DEPRECATED: Use v1.StaticPodOperatorStatus instead",
+ "latestAvailableDeploymentGeneration": "latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment",
+ "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes",
+}
+
+func (StaticPodOperatorStatus) SwaggerDoc() map[string]string {
+ return map_StaticPodOperatorStatus
+}
+
+var map_VersionAvailability = map[string]string{
+ "": "VersionAvailability gives information about the synchronization and operational status of a particular version of the component DEPRECATED: Use fields in v1.OperatorStatus instead",
+ "version": "version is the level this availability applies to",
+ "updatedReplicas": "updatedReplicas indicates how many replicas are at the desired state",
+ "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state",
+ "errors": "errors indicates what failures are associated with the operator trying to manage this version",
+ "generations": "generations allows an operator to track what the generation of \"important\" resources was the last time we updated them",
+}
+
+func (VersionAvailability) SwaggerDoc() map[string]string {
+ return map_VersionAvailability
+}
+
+var map_BackupJobReference = map[string]string{
+ "": "BackupJobReference holds a reference to the batch/v1 Job created to run the etcd backup",
+ "namespace": "namespace is the namespace of the Job. this is always expected to be \"openshift-etcd\" since the user provided PVC is also required to be in \"openshift-etcd\" Required",
+ "name": "name is the name of the Job. Required",
+}
+
+func (BackupJobReference) SwaggerDoc() map[string]string {
+ return map_BackupJobReference
+}
+
+var map_EtcdBackup = map[string]string{
+ "": "\n\n# EtcdBackup provides configuration options and status for a one-time backup attempt of the etcd cluster\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (EtcdBackup) SwaggerDoc() map[string]string {
+ return map_EtcdBackup
+}
+
+var map_EtcdBackupList = map[string]string{
+ "": "EtcdBackupList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (EtcdBackupList) SwaggerDoc() map[string]string {
+ return map_EtcdBackupList
+}
+
+var map_EtcdBackupSpec = map[string]string{
+ "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.",
+}
+
+func (EtcdBackupSpec) SwaggerDoc() map[string]string {
+ return map_EtcdBackupSpec
+}
+
+var map_EtcdBackupStatus = map[string]string{
+ "conditions": "conditions provide details on the status of the etcd backup job.",
+ "backupJob": "backupJob is the reference to the Job that executes the backup. Optional",
+}
+
+func (EtcdBackupStatus) SwaggerDoc() map[string]string {
+ return map_EtcdBackupStatus
+}
+
+var map_ImageContentSourcePolicy = map[string]string{
+ "": "ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+}
+
+func (ImageContentSourcePolicy) SwaggerDoc() map[string]string {
+ return map_ImageContentSourcePolicy
+}
+
+var map_ImageContentSourcePolicyList = map[string]string{
+ "": "ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ImageContentSourcePolicyList) SwaggerDoc() map[string]string {
+ return map_ImageContentSourcePolicyList
+}
+
+var map_ImageContentSourcePolicySpec = map[string]string{
+ "": "ImageContentSourcePolicySpec is the specification of the ImageContentSourcePolicy CRD.",
+ "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. Only image pull specifications that have an image digest will have this behavior applied to them - tags will continue to be pulled from the specified repository in the pull spec.\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.",
+}
+
+func (ImageContentSourcePolicySpec) SwaggerDoc() map[string]string {
+ return map_ImageContentSourcePolicySpec
+}
+
+var map_RepositoryDigestMirrors = map[string]string{
+ "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirros in the registries config. Note: the mirrors only work when pulling the images that are referenced by their digests.",
+ "source": "source is the repository that users refer to, e.g. in image pull specifications.",
+ "mirrors": "mirrors is one or more repositories that may also contain the same images. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.",
+}
+
+func (RepositoryDigestMirrors) SwaggerDoc() map[string]string {
+ return map_RepositoryDigestMirrors
+}
+
+var map_OLM = map[string]string{
+ "": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec holds user settable values for configuration",
+ "status": "status holds observed values from the cluster. They may not be overridden.",
+}
+
+func (OLM) SwaggerDoc() map[string]string {
+ return map_OLM
+}
+
+var map_OLMList = map[string]string{
+ "": "OLMList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (OLMList) SwaggerDoc() map[string]string {
+ return map_OLMList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/.codegen.yaml b/vendor/github.com/openshift/api/operatorcontrolplane/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/install.go b/vendor/github.com/openshift/api/operatorcontrolplane/install.go
new file mode 100644
index 0000000000..8e8abd0abf
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/install.go
@@ -0,0 +1,26 @@
+package operatorcontrolplane
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/openshift/api/operatorcontrolplane/v1alpha1"
+)
+
+const (
+ GroupName = "controlplane.operator.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(v1alpha1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/Makefile b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/Makefile
new file mode 100644
index 0000000000..11371b1262
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="controlplane.operator.openshift.io/v1alpha1"
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/doc.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/doc.go
new file mode 100644
index 0000000000..73f55856a8
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +kubebuilder:validation:Optional
+// +groupName=controlplane.operator.openshift.io
+
+package v1alpha1
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/register.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/register.go
new file mode 100644
index 0000000000..1ffc55381c
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/register.go
@@ -0,0 +1,39 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "controlplane.operator.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+
+ scheme.AddKnownTypes(GroupVersion,
+ &PodNetworkConnectivityCheck{},
+ &PodNetworkConnectivityCheckList{},
+ )
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go
new file mode 100644
index 0000000000..f4b48e8545
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go
@@ -0,0 +1,198 @@
+// Package v1alpha1 is an API version in the controlplane.operator.openshift.io group
+package v1alpha1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodNetworkConnectivityCheck
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=podnetworkconnectivitychecks,scope=Namespaced
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/639
+// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=network,operatorOrdering=01
+// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true
+// +openshift:compatibility-gen:level=4
+type PodNetworkConnectivityCheck struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec defines the source and target of the connectivity check
+ // +kubebuilder:validation:Required
+ // +required
+ Spec PodNetworkConnectivityCheckSpec `json:"spec"`
+
+ // Status contains the observed status of the connectivity check
+ // +optional
+ Status PodNetworkConnectivityCheckStatus `json:"status,omitempty"`
+}
+
+type PodNetworkConnectivityCheckSpec struct {
+ // SourcePod names the pod from which the condition will be checked
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
+ // +required
+ SourcePod string `json:"sourcePod"`
+
+ // EndpointAddress to check. A TCP address of the form host:port. Note that
+ // if host is a DNS name, then the check would fail if the DNS name cannot
+ // be resolved. Specify an IP address for host to bypass DNS name lookup.
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^\S+:\d*$`
+ // +required
+ TargetEndpoint string `json:"targetEndpoint"`
+
+ // TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and
+ // 'tls.key' entries containing an optional TLS client certificate and key to be used when
+ // checking endpoints that require a client certificate in order to gracefully preform the
+ // scan without causing excessive logging in the endpoint process. The secret must exist in
+ // the same namespace as this resource.
+ // +optional
+ TLSClientCert v1.SecretNameReference `json:"tlsClientCert,omitempty"`
+}
+
+// +k8s:deepcopy-gen=true
+type PodNetworkConnectivityCheckStatus struct {
+ // Successes contains logs successful check actions
+ // +optional
+ Successes []LogEntry `json:"successes,omitempty"`
+
+ // Failures contains logs of unsuccessful check actions
+ // +optional
+ Failures []LogEntry `json:"failures,omitempty"`
+
+ // Outages contains logs of time periods of outages
+ // +optional
+ Outages []OutageEntry `json:"outages,omitempty"`
+
+ // Conditions summarize the status of the check
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +optional
+ Conditions []PodNetworkConnectivityCheckCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
+
+// LogEntry records events
+type LogEntry struct {
+ // Start time of check action.
+ // +kubebuilder:validation:Required
+ // +required
+ // +nullable
+ Start metav1.Time `json:"time"`
+
+ // Success indicates if the log entry indicates a success or failure.
+ // +kubebuilder:validation:Required
+ // +required
+ Success bool `json:"success"`
+
+ // Reason for status in a machine readable format.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+
+ // Message explaining status in a human readable format.
+ // +optional
+ Message string `json:"message,omitempty"`
+
+ // Latency records how long the action mentioned in the entry took.
+ // +optional
+ // +nullable
+ Latency metav1.Duration `json:"latency,omitempty"`
+}
+
+// OutageEntry records time period of an outage
+type OutageEntry struct {
+
+ // Start of outage detected
+ // +kubebuilder:validation:Required
+ // +required
+ // +nullable
+ Start metav1.Time `json:"start"`
+
+ // End of outage detected
+ // +optional
+ // +nullable
+ End metav1.Time `json:"end,omitempty"`
+
+ // StartLogs contains log entries related to the start of this outage. Should contain
+ // the original failure, any entries where the failure mode changed.
+ // +optional
+ StartLogs []LogEntry `json:"startLogs,omitempty"`
+
+ // EndLogs contains log entries related to the end of this outage. Should contain the success
+ // entry that resolved the outage and possibly a few of the failure log entries that preceded it.
+ // +optional
+ EndLogs []LogEntry `json:"endLogs,omitempty"`
+
+ // Message summarizes outage details in a human readable format.
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+// PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity.
+// +k8s:deepcopy-gen=true
+type PodNetworkConnectivityCheckCondition struct {
+
+ // Type of the condition
+ // +kubebuilder:validation:Required
+ // +required
+ Type PodNetworkConnectivityCheckConditionType `json:"type"`
+
+ // Status of the condition
+ // +kubebuilder:validation:Required
+ // +required
+ Status metav1.ConditionStatus `json:"status"`
+
+ // Reason for the condition's last status transition in a machine readable format.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+
+ // Message indicating details about last transition in a human readable format.
+ // +optional
+ Message string `json:"message,omitempty"`
+
+ // Last time the condition transitioned from one status to another.
+ // +kubebuilder:validation:Required
+ // +required
+ // +nullable
+ LastTransitionTime metav1.Time `json:"lastTransitionTime"`
+}
+
+const (
+ LogEntryReasonDNSResolve = "DNSResolve"
+ LogEntryReasonDNSError = "DNSError"
+ LogEntryReasonTCPConnect = "TCPConnect"
+ LogEntryReasonTCPConnectError = "TCPConnectError"
+)
+
+type PodNetworkConnectivityCheckConditionType string
+
+const (
+ // Reachable indicates that the endpoint was reachable from the pod.
+ Reachable PodNetworkConnectivityCheckConditionType = "Reachable"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodNetworkConnectivityCheckList is a collection of PodNetworkConnectivityCheck
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type PodNetworkConnectivityCheckList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata"`
+
+ // Items contains the items
+ Items []PodNetworkConnectivityCheck `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..26431d8c1b
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,199 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LogEntry) DeepCopyInto(out *LogEntry) {
+ *out = *in
+ in.Start.DeepCopyInto(&out.Start)
+ out.Latency = in.Latency
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogEntry.
+func (in *LogEntry) DeepCopy() *LogEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(LogEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutageEntry) DeepCopyInto(out *OutageEntry) {
+ *out = *in
+ in.Start.DeepCopyInto(&out.Start)
+ in.End.DeepCopyInto(&out.End)
+ if in.StartLogs != nil {
+ in, out := &in.StartLogs, &out.StartLogs
+ *out = make([]LogEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.EndLogs != nil {
+ in, out := &in.EndLogs, &out.EndLogs
+ *out = make([]LogEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageEntry.
+func (in *OutageEntry) DeepCopy() *OutageEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(OutageEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodNetworkConnectivityCheck) DeepCopyInto(out *PodNetworkConnectivityCheck) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheck.
+func (in *PodNetworkConnectivityCheck) DeepCopy() *PodNetworkConnectivityCheck {
+ if in == nil {
+ return nil
+ }
+ out := new(PodNetworkConnectivityCheck)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodNetworkConnectivityCheck) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodNetworkConnectivityCheckCondition) DeepCopyInto(out *PodNetworkConnectivityCheckCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckCondition.
+func (in *PodNetworkConnectivityCheckCondition) DeepCopy() *PodNetworkConnectivityCheckCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(PodNetworkConnectivityCheckCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodNetworkConnectivityCheckList) DeepCopyInto(out *PodNetworkConnectivityCheckList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PodNetworkConnectivityCheck, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckList.
+func (in *PodNetworkConnectivityCheckList) DeepCopy() *PodNetworkConnectivityCheckList {
+ if in == nil {
+ return nil
+ }
+ out := new(PodNetworkConnectivityCheckList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodNetworkConnectivityCheckList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodNetworkConnectivityCheckSpec) DeepCopyInto(out *PodNetworkConnectivityCheckSpec) {
+ *out = *in
+ out.TLSClientCert = in.TLSClientCert
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckSpec.
+func (in *PodNetworkConnectivityCheckSpec) DeepCopy() *PodNetworkConnectivityCheckSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PodNetworkConnectivityCheckSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodNetworkConnectivityCheckStatus) DeepCopyInto(out *PodNetworkConnectivityCheckStatus) {
+ *out = *in
+ if in.Successes != nil {
+ in, out := &in.Successes, &out.Successes
+ *out = make([]LogEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Failures != nil {
+ in, out := &in.Failures, &out.Failures
+ *out = make([]LogEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Outages != nil {
+ in, out := &in.Outages, &out.Outages
+ *out = make([]OutageEntry, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]PodNetworkConnectivityCheckCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckStatus.
+func (in *PodNetworkConnectivityCheckStatus) DeepCopy() *PodNetworkConnectivityCheckStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PodNetworkConnectivityCheckStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..2032118c9c
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,22 @@
+podnetworkconnectivitychecks.controlplane.operator.openshift.io:
+ Annotations:
+ include.release.openshift.io/self-managed-high-availability: "true"
+ ApprovedPRNumber: https://github.com/openshift/api/pull/639
+ CRDName: podnetworkconnectivitychecks.controlplane.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: network
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_10"
+ GroupName: controlplane.operator.openshift.io
+ HasStatus: true
+ KindName: PodNetworkConnectivityCheck
+ Labels: {}
+ PluralName: podnetworkconnectivitychecks
+ PrinterColumns: []
+ Scope: Namespaced
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1alpha1
+
diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..5ecc5e48af
--- /dev/null
+++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,95 @@
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_LogEntry = map[string]string{
+ "": "LogEntry records events",
+ "time": "Start time of check action.",
+ "success": "Success indicates if the log entry indicates a success or failure.",
+ "reason": "Reason for status in a machine readable format.",
+ "message": "Message explaining status in a human readable format.",
+ "latency": "Latency records how long the action mentioned in the entry took.",
+}
+
+func (LogEntry) SwaggerDoc() map[string]string {
+ return map_LogEntry
+}
+
+var map_OutageEntry = map[string]string{
+ "": "OutageEntry records time period of an outage",
+ "start": "Start of outage detected",
+ "end": "End of outage detected",
+ "startLogs": "StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.",
+ "endLogs": "EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.",
+ "message": "Message summarizes outage details in a human readable format.",
+}
+
+func (OutageEntry) SwaggerDoc() map[string]string {
+ return map_OutageEntry
+}
+
+var map_PodNetworkConnectivityCheck = map[string]string{
+ "": "PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the source and target of the connectivity check",
+ "status": "Status contains the observed status of the connectivity check",
+}
+
+func (PodNetworkConnectivityCheck) SwaggerDoc() map[string]string {
+ return map_PodNetworkConnectivityCheck
+}
+
+var map_PodNetworkConnectivityCheckCondition = map[string]string{
+ "": "PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity.",
+ "type": "Type of the condition",
+ "status": "Status of the condition",
+ "reason": "Reason for the condition's last status transition in a machine readable format.",
+ "message": "Message indicating details about last transition in a human readable format.",
+ "lastTransitionTime": "Last time the condition transitioned from one status to another.",
+}
+
+func (PodNetworkConnectivityCheckCondition) SwaggerDoc() map[string]string {
+ return map_PodNetworkConnectivityCheckCondition
+}
+
+var map_PodNetworkConnectivityCheckList = map[string]string{
+ "": "PodNetworkConnectivityCheckList is a collection of PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (PodNetworkConnectivityCheckList) SwaggerDoc() map[string]string {
+ return map_PodNetworkConnectivityCheckList
+}
+
+var map_PodNetworkConnectivityCheckSpec = map[string]string{
+ "sourcePod": "SourcePod names the pod from which the condition will be checked",
+ "targetEndpoint": "EndpointAddress to check. A TCP address of the form host:port. Note that if host is a DNS name, then the check would fail if the DNS name cannot be resolved. Specify an IP address for host to bypass DNS name lookup.",
+ "tlsClientCert": "TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource.",
+}
+
+func (PodNetworkConnectivityCheckSpec) SwaggerDoc() map[string]string {
+ return map_PodNetworkConnectivityCheckSpec
+}
+
+var map_PodNetworkConnectivityCheckStatus = map[string]string{
+ "successes": "Successes contains logs successful check actions",
+ "failures": "Failures contains logs of unsuccessful check actions",
+ "outages": "Outages contains logs of time periods of outages",
+ "conditions": "Conditions summarize the status of the check",
+}
+
+func (PodNetworkConnectivityCheckStatus) SwaggerDoc() map[string]string {
+ return map_PodNetworkConnectivityCheckStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/osin/install.go b/vendor/github.com/openshift/api/osin/install.go
new file mode 100644
index 0000000000..3f773985b4
--- /dev/null
+++ b/vendor/github.com/openshift/api/osin/install.go
@@ -0,0 +1,26 @@
+package osin
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ osinv1 "github.com/openshift/api/osin/v1"
+)
+
+const (
+ GroupName = "osin.config.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(osinv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/osin/v1/doc.go b/vendor/github.com/openshift/api/osin/v1/doc.go
new file mode 100644
index 0000000000..b74dfc48ad
--- /dev/null
+++ b/vendor/github.com/openshift/api/osin/v1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=osin.config.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/osin/v1/register.go b/vendor/github.com/openshift/api/osin/v1/register.go
new file mode 100644
index 0000000000..4d54a5df40
--- /dev/null
+++ b/vendor/github.com/openshift/api/osin/v1/register.go
@@ -0,0 +1,50 @@
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "osin.config.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &OsinServerConfig{},
+
+ &BasicAuthPasswordIdentityProvider{},
+ &AllowAllPasswordIdentityProvider{},
+ &DenyAllPasswordIdentityProvider{},
+ &HTPasswdPasswordIdentityProvider{},
+ &LDAPPasswordIdentityProvider{},
+ &KeystonePasswordIdentityProvider{},
+ &RequestHeaderIdentityProvider{},
+ &GitHubIdentityProvider{},
+ &GitLabIdentityProvider{},
+ &GoogleIdentityProvider{},
+ &OpenIDIdentityProvider{},
+
+ &SessionSecrets{},
+ )
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/osin/v1/types.go b/vendor/github.com/openshift/api/osin/v1/types.go
new file mode 100644
index 0000000000..0ea4be1ba0
--- /dev/null
+++ b/vendor/github.com/openshift/api/osin/v1/types.go
@@ -0,0 +1,488 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type OsinServerConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // provides the standard apiserver configuration
+ configv1.GenericAPIServerConfig `json:",inline"`
+
+ // oauthConfig holds the necessary configuration options for OAuth authentication
+ OAuthConfig OAuthConfig `json:"oauthConfig"`
+}
+
+// OAuthConfig holds the necessary configuration options for OAuth authentication
+type OAuthConfig struct {
+ // masterCA is the CA for verifying the TLS connection back to the MasterURL.
+ // This field is deprecated and will be removed in a future release.
+ // See loginURL for details.
+ // Deprecated
+ MasterCA *string `json:"masterCA"`
+
+ // masterURL is used for making server-to-server calls to exchange authorization codes for access tokens
+ // This field is deprecated and will be removed in a future release.
+ // See loginURL for details.
+ // Deprecated
+ MasterURL string `json:"masterURL"`
+
+ // masterPublicURL is used for building valid client redirect URLs for internal and external access
+ // This field is deprecated and will be removed in a future release.
+ // See loginURL for details.
+ // Deprecated
+ MasterPublicURL string `json:"masterPublicURL"`
+
+ // loginURL, along with masterCA, masterURL and masterPublicURL have distinct
+ // meanings depending on how the OAuth server is run. The two states are:
+ // 1. embedded in the kube api server (all 3.x releases)
+ // 2. as a standalone external process (all 4.x releases)
+ // in the embedded configuration, loginURL is equivalent to masterPublicURL
+ // and the other fields have functionality that matches their docs.
+ // in the standalone configuration, the fields are used as:
+ // loginURL is the URL required to login to the cluster:
+ // oc login --server=
+ // masterPublicURL is the issuer URL
+ // it is accessible from inside (service network) and outside (ingress) of the cluster
+ // masterURL is the loopback variation of the token_endpoint URL with no path component
+ // it is only accessible from inside (service network) of the cluster
+ // masterCA is used to perform TLS verification for connections made to masterURL
+ // For further details, see the IETF Draft:
+ // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2
+ LoginURL string `json:"loginURL"`
+
+ // assetPublicURL is used for building valid client redirect URLs for external access
+ AssetPublicURL string `json:"assetPublicURL"`
+
+ // alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.
+ AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"`
+
+ //identityProviders is an ordered list of ways for a user to identify themselves
+ IdentityProviders []IdentityProvider `json:"identityProviders"`
+
+ // grantConfig describes how to handle grants
+ GrantConfig GrantConfig `json:"grantConfig"`
+
+ // sessionConfig hold information about configuring sessions.
+ SessionConfig *SessionConfig `json:"sessionConfig"`
+
+ // tokenConfig contains options for authorization and access tokens
+ TokenConfig TokenConfig `json:"tokenConfig"`
+
+ // templates allow you to customize pages like the login page.
+ Templates *OAuthTemplates `json:"templates"`
+}
+
+// OAuthTemplates allow for customization of pages like the login page
+type OAuthTemplates struct {
+ // login is a path to a file containing a go template used to render the login page.
+ // If unspecified, the default login page is used.
+ Login string `json:"login"`
+
+ // providerSelection is a path to a file containing a go template used to render the provider selection page.
+ // If unspecified, the default provider selection page is used.
+ ProviderSelection string `json:"providerSelection"`
+
+ // error is a path to a file containing a go template used to render error pages during the authentication or grant flow
+ // If unspecified, the default error page is used.
+ Error string `json:"error"`
+}
+
+// IdentityProvider provides identities for users authenticating using credentials
+type IdentityProvider struct {
+ // name is used to qualify the identities returned by this provider
+ Name string `json:"name"`
+ // challenge indicates whether to issue WWW-Authenticate challenges for this provider
+ UseAsChallenger bool `json:"challenge"`
+ // login indicates whether to use this identity provider for unauthenticated browsers to login against
+ UseAsLogin bool `json:"login"`
+ // mappingMethod determines how identities from this provider are mapped to users
+ MappingMethod string `json:"mappingMethod"`
+ // provider contains the information about how to set up a specific identity provider
+ // +kubebuilder:pruning:PreserveUnknownFields
+ Provider runtime.RawExtension `json:"provider"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type BasicAuthPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // RemoteConnectionInfo contains information about how to connect to the external basic auth server
+ configv1.RemoteConnectionInfo `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type AllowAllPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DenyAllPasswordIdentityProvider provides no identities for users
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type DenyAllPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type HTPasswdPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // file is a reference to your htpasswd file
+ File string `json:"file"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type LDAPPasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+ // url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is
+ // ldap://host:port/basedn?attribute?scope?filter
+ URL string `json:"url"`
+ // bindDN is an optional DN to bind with during the search phase.
+ BindDN string `json:"bindDN"`
+ // bindPassword is an optional password to bind with during the search phase.
+ BindPassword configv1.StringSource `json:"bindPassword"`
+
+ // insecure, if true, indicates the connection should not use TLS.
+ // Cannot be set to true with a URL scheme of "ldaps://"
+ // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830
+ Insecure bool `json:"insecure"`
+ // ca is the optional trusted certificate authority bundle to use when making requests to the server
+ // If empty, the default system roots are used
+ CA string `json:"ca"`
+ // attributes maps LDAP attributes to identities
+ Attributes LDAPAttributeMapping `json:"attributes"`
+}
+
+// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields
+type LDAPAttributeMapping struct {
+ // id is the list of attributes whose values should be used as the user ID. Required.
+ // LDAP standard identity attribute is "dn"
+ ID []string `json:"id"`
+ // preferredUsername is the list of attributes whose values should be used as the preferred username.
+ // LDAP standard login attribute is "uid"
+ PreferredUsername []string `json:"preferredUsername"`
+ // name is the list of attributes whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ // LDAP standard display name attribute is "cn"
+ Name []string `json:"name"`
+ // email is the list of attributes whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ Email []string `json:"email"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type KeystonePasswordIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+ // RemoteConnectionInfo contains information about how to connect to the keystone server
+ configv1.RemoteConnectionInfo `json:",inline"`
+ // domainName is required for keystone v3
+ DomainName string `json:"domainName"`
+ // useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username
+ UseKeystoneIdentity bool `json:"useKeystoneIdentity"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type RequestHeaderIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // loginURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ LoginURL string `json:"loginURL"`
+
+ // challengeURL is a URL to redirect unauthenticated /authorize requests to
+ // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here
+ // ${url} is replaced with the current URL, escaped to be safe in a query parameter
+ // https://www.example.com/sso-login?then=${url}
+ // ${query} is replaced with the current query string
+ // https://www.example.com/auth-proxy/oauth/authorize?${query}
+ ChallengeURL string `json:"challengeURL"`
+
+ // clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.
+ ClientCA string `json:"clientCA"`
+ // clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.
+ ClientCommonNames []string `json:"clientCommonNames"`
+
+ // headers is the set of headers to check for identity information
+ Headers []string `json:"headers"`
+ // preferredUsernameHeaders is the set of headers to check for the preferred username
+ PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"`
+ // nameHeaders is the set of headers to check for the display name
+ NameHeaders []string `json:"nameHeaders"`
+ // emailHeaders is the set of headers to check for the email address
+ EmailHeaders []string `json:"emailHeaders"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type GitHubIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // clientSecret is the oauth client secret
+ ClientSecret configv1.StringSource `json:"clientSecret"`
+ // organizations optionally restricts which organizations are allowed to log in
+ Organizations []string `json:"organizations"`
+ // teams optionally restricts which teams are allowed to log in. Format is /.
+ Teams []string `json:"teams"`
+ // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise.
+ // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.
+ Hostname string `json:"hostname"`
+ // ca is the optional trusted certificate authority bundle to use when making requests to the server.
+ // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.
+ CA string `json:"ca"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type GitLabIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ca is the optional trusted certificate authority bundle to use when making requests to the server
+ // If empty, the default system roots are used
+ CA string `json:"ca"`
+ // url is the oauth server base URL
+ URL string `json:"url"`
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // clientSecret is the oauth client secret
+ ClientSecret configv1.StringSource `json:"clientSecret"`
+ // legacy determines if OAuth2 or OIDC should be used
+ // If true, OAuth2 is used
+ // If false, OIDC is used
+ // If nil and the URL's host is gitlab.com, OIDC is used
+ // Otherwise, OAuth2 is used
+ // In a future release, nil will default to using OIDC
+ // Eventually this flag will be removed and only OIDC will be used
+ Legacy *bool `json:"legacy,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GoogleIdentityProvider provides identities for users authenticating using Google credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type GoogleIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // clientSecret is the oauth client secret
+ ClientSecret configv1.StringSource `json:"clientSecret"`
+
+ // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to
+ HostedDomain string `json:"hostedDomain"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type OpenIDIdentityProvider struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ca is the optional trusted certificate authority bundle to use when making requests to the server
+ // If empty, the default system roots are used
+ CA string `json:"ca"`
+
+ // clientID is the oauth client ID
+ ClientID string `json:"clientID"`
+ // clientSecret is the oauth client secret
+ ClientSecret configv1.StringSource `json:"clientSecret"`
+
+ // extraScopes are any scopes to request in addition to the standard "openid" scope.
+ ExtraScopes []string `json:"extraScopes"`
+
+ // extraAuthorizeParameters are any custom parameters to add to the authorize request.
+ ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"`
+
+ // urls to use to authenticate
+ URLs OpenIDURLs `json:"urls"`
+
+ // claims mappings
+ Claims OpenIDClaims `json:"claims"`
+}
+
+// OpenIDURLs are URLs to use when authenticating with an OpenID identity provider
+type OpenIDURLs struct {
+ // authorize is the oauth authorization URL
+ Authorize string `json:"authorize"`
+ // token is the oauth token granting URL
+ Token string `json:"token"`
+ // userInfo is the optional userinfo URL.
+ // If present, a granted access_token is used to request claims
+ // If empty, a granted id_token is parsed for claims
+ UserInfo string `json:"userInfo"`
+}
+
+// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider
+type OpenIDClaims struct {
+ // id is the list of claims whose values should be used as the user ID. Required.
+ // OpenID standard identity claim is "sub"
+ ID []string `json:"id"`
+ // preferredUsername is the list of claims whose values should be used as the preferred username.
+ // If unspecified, the preferred username is determined from the value of the id claim
+ PreferredUsername []string `json:"preferredUsername"`
+ // name is the list of claims whose values should be used as the display name. Optional.
+ // If unspecified, no display name is set for the identity
+ Name []string `json:"name"`
+ // email is the list of claims whose values should be used as the email address. Optional.
+ // If unspecified, no email is set for the identity
+ Email []string `json:"email"`
+ // groups is the list of claims value of which should be used to synchronize groups
+ // from the OIDC provider to OpenShift for the user
+ Groups []string `json:"groups"`
+}
+
+// GrantConfig holds the necessary configuration options for grant handlers
+type GrantConfig struct {
+ // method determines the default strategy to use when an OAuth client requests a grant.
+ // This method will be used only if the specific OAuth client doesn't provide a strategy
+ // of their own. Valid grant handling methods are:
+ // - auto: always approves grant requests, useful for trusted clients
+ // - prompt: prompts the end user for approval of grant requests, useful for third-party clients
+ // - deny: always denies grant requests, useful for black-listed clients
+ Method GrantHandlerType `json:"method"`
+
+ // serviceAccountMethod is used for determining client authorization for service account oauth client.
+ // It must be either: deny, prompt
+ ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"`
+}
+
+type GrantHandlerType string
+
+const (
+ // auto auto-approves client authorization grant requests
+ GrantHandlerAuto GrantHandlerType = "auto"
+ // prompt prompts the user to approve new client authorization grant requests
+ GrantHandlerPrompt GrantHandlerType = "prompt"
+ // deny auto-denies client authorization grant requests
+ GrantHandlerDeny GrantHandlerType = "deny"
+)
+
+// SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession
+type SessionConfig struct {
+ // sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object
+ // If no file is specified, a random signing and encryption key are generated at each server start
+ SessionSecretsFile string `json:"sessionSecretsFile"`
+ // sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession
+ SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"`
+ // sessionName is the cookie name used to store the session
+ SessionName string `json:"sessionName"`
+}
+
+// TokenConfig holds the necessary configuration options for authorization and access tokens
+type TokenConfig struct {
+ // authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens
+ AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds,omitempty"`
+ // accessTokenMaxAgeSeconds defines the maximum age of access tokens
+ AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"`
+ // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.
+ // +optional
+ AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"`
+ // accessTokenInactivityTimeout defines the token inactivity timeout
+ // for tokens granted by any client.
+ // The value represents the maximum amount of time that can occur between
+ // consecutive uses of the token. Tokens become invalid if they are not
+ // used within this temporal window. The user will need to acquire a new
+ // token to regain access once a token times out. Takes valid time
+ // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed
+ // value for duration is 300s (5 minutes). If the timeout is configured
+ // per client, then that value takes precedence. If the timeout value is
+ // not specified and the client does not override the value, then tokens
+ // are valid until their lifetime.
+ // +optional
+ AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type SessionSecrets struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Secrets is a list of secrets
+ // New sessions are signed and encrypted using the first secret.
+ // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.
+ Secrets []SessionSecret `json:"secrets"`
+}
+
+// SessionSecret is a secret used to authenticate/decrypt cookie-based sessions
+type SessionSecret struct {
+ // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.
+ Authentication string `json:"authentication"`
+ // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-
+ Encryption string `json:"encryption"`
+}
diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..cb90b8365d
--- /dev/null
+++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go
@@ -0,0 +1,645 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllowAllPasswordIdentityProvider) DeepCopyInto(out *AllowAllPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowAllPasswordIdentityProvider.
+func (in *AllowAllPasswordIdentityProvider) DeepCopy() *AllowAllPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(AllowAllPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AllowAllPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BasicAuthPasswordIdentityProvider) DeepCopyInto(out *BasicAuthPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.RemoteConnectionInfo = in.RemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthPasswordIdentityProvider.
+func (in *BasicAuthPasswordIdentityProvider) DeepCopy() *BasicAuthPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(BasicAuthPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BasicAuthPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DenyAllPasswordIdentityProvider) DeepCopyInto(out *DenyAllPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenyAllPasswordIdentityProvider.
+func (in *DenyAllPasswordIdentityProvider) DeepCopy() *DenyAllPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(DenyAllPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DenyAllPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ if in.Organizations != nil {
+ in, out := &in.Organizations, &out.Organizations
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Teams != nil {
+ in, out := &in.Teams, &out.Teams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider.
+func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitHubIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GitHubIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ if in.Legacy != nil {
+ in, out := &in.Legacy, &out.Legacy
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider.
+func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GitLabIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GitLabIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider.
+func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(GoogleIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GoogleIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GrantConfig) DeepCopyInto(out *GrantConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantConfig.
+func (in *GrantConfig) DeepCopy() *GrantConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(GrantConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTPasswdPasswordIdentityProvider) DeepCopyInto(out *HTPasswdPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdPasswordIdentityProvider.
+func (in *HTPasswdPasswordIdentityProvider) DeepCopy() *HTPasswdPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(HTPasswdPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HTPasswdPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) {
+ *out = *in
+ in.Provider.DeepCopyInto(&out.Provider)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider.
+func (in *IdentityProvider) DeepCopy() *IdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KeystonePasswordIdentityProvider) DeepCopyInto(out *KeystonePasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.RemoteConnectionInfo = in.RemoteConnectionInfo
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystonePasswordIdentityProvider.
+func (in *KeystonePasswordIdentityProvider) DeepCopy() *KeystonePasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(KeystonePasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *KeystonePasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping.
+func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPAttributeMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LDAPPasswordIdentityProvider) DeepCopyInto(out *LDAPPasswordIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.BindPassword = in.BindPassword
+ in.Attributes.DeepCopyInto(&out.Attributes)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPPasswordIdentityProvider.
+func (in *LDAPPasswordIdentityProvider) DeepCopy() *LDAPPasswordIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(LDAPPasswordIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LDAPPasswordIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthConfig) DeepCopyInto(out *OAuthConfig) {
+ *out = *in
+ if in.MasterCA != nil {
+ in, out := &in.MasterCA, &out.MasterCA
+ *out = new(string)
+ **out = **in
+ }
+ if in.IdentityProviders != nil {
+ in, out := &in.IdentityProviders, &out.IdentityProviders
+ *out = make([]IdentityProvider, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.GrantConfig = in.GrantConfig
+ if in.SessionConfig != nil {
+ in, out := &in.SessionConfig, &out.SessionConfig
+ *out = new(SessionConfig)
+ **out = **in
+ }
+ in.TokenConfig.DeepCopyInto(&out.TokenConfig)
+ if in.Templates != nil {
+ in, out := &in.Templates, &out.Templates
+ *out = new(OAuthTemplates)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthConfig.
+func (in *OAuthConfig) DeepCopy() *OAuthConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates.
+func (in *OAuthTemplates) DeepCopy() *OAuthTemplates {
+ if in == nil {
+ return nil
+ }
+ out := new(OAuthTemplates)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) {
+ *out = *in
+ if in.ID != nil {
+ in, out := &in.ID, &out.ID
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsername != nil {
+ in, out := &in.PreferredUsername, &out.PreferredUsername
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Email != nil {
+ in, out := &in.Email, &out.Email
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims.
+func (in *OpenIDClaims) DeepCopy() *OpenIDClaims {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDClaims)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ClientSecret = in.ClientSecret
+ if in.ExtraScopes != nil {
+ in, out := &in.ExtraScopes, &out.ExtraScopes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExtraAuthorizeParameters != nil {
+ in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.URLs = in.URLs
+ in.Claims.DeepCopyInto(&out.Claims)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider.
+func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenIDIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenIDURLs) DeepCopyInto(out *OpenIDURLs) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDURLs.
+func (in *OpenIDURLs) DeepCopy() *OpenIDURLs {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenIDURLs)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OsinServerConfig) DeepCopyInto(out *OsinServerConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig)
+ in.OAuthConfig.DeepCopyInto(&out.OAuthConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsinServerConfig.
+func (in *OsinServerConfig) DeepCopy() *OsinServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OsinServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OsinServerConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.ClientCommonNames != nil {
+ in, out := &in.ClientCommonNames, &out.ClientCommonNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.PreferredUsernameHeaders != nil {
+ in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.NameHeaders != nil {
+ in, out := &in.NameHeaders, &out.NameHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.EmailHeaders != nil {
+ in, out := &in.EmailHeaders, &out.EmailHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider.
+func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider {
+ if in == nil {
+ return nil
+ }
+ out := new(RequestHeaderIdentityProvider)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RequestHeaderIdentityProvider) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionConfig) DeepCopyInto(out *SessionConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionConfig.
+func (in *SessionConfig) DeepCopy() *SessionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(SessionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionSecret) DeepCopyInto(out *SessionSecret) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecret.
+func (in *SessionSecret) DeepCopy() *SessionSecret {
+ if in == nil {
+ return nil
+ }
+ out := new(SessionSecret)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionSecrets) DeepCopyInto(out *SessionSecrets) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Secrets != nil {
+ in, out := &in.Secrets, &out.Secrets
+ *out = make([]SessionSecret, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecrets.
+func (in *SessionSecrets) DeepCopy() *SessionSecrets {
+ if in == nil {
+ return nil
+ }
+ out := new(SessionSecrets)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SessionSecrets) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenConfig) DeepCopyInto(out *TokenConfig) {
+ *out = *in
+ if in.AccessTokenInactivityTimeoutSeconds != nil {
+ in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.AccessTokenInactivityTimeout != nil {
+ in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig.
+func (in *TokenConfig) DeepCopy() *TokenConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TokenConfig)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..0bffa8265b
--- /dev/null
+++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,280 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AllowAllPasswordIdentityProvider = map[string]string{
+ "": "AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (AllowAllPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_AllowAllPasswordIdentityProvider
+}
+
+var map_BasicAuthPasswordIdentityProvider = map[string]string{
+ "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (BasicAuthPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_BasicAuthPasswordIdentityProvider
+}
+
+var map_DenyAllPasswordIdentityProvider = map[string]string{
+ "": "DenyAllPasswordIdentityProvider provides no identities for users\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+}
+
+func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_DenyAllPasswordIdentityProvider
+}
+
+var map_GitHubIdentityProvider = map[string]string{
+ "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is the oauth client secret",
+ "organizations": "organizations optionally restricts which organizations are allowed to log in",
+ "teams": "teams optionally restricts which teams are allowed to log in. Format is /.",
+ "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.",
+ "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.",
+}
+
+func (GitHubIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitHubIdentityProvider
+}
+
+var map_GitLabIdentityProvider = map[string]string{
+ "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used",
+ "url": "url is the oauth server base URL",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is the oauth client secret",
+ "legacy": "legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used",
+}
+
+func (GitLabIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GitLabIdentityProvider
+}
+
+var map_GoogleIdentityProvider = map[string]string{
+ "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is the oauth client secret",
+ "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to",
+}
+
+func (GoogleIdentityProvider) SwaggerDoc() map[string]string {
+ return map_GoogleIdentityProvider
+}
+
+var map_GrantConfig = map[string]string{
+ "": "GrantConfig holds the necessary configuration options for grant handlers",
+ "method": "method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients",
+ "serviceAccountMethod": "serviceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt",
+}
+
+func (GrantConfig) SwaggerDoc() map[string]string {
+ return map_GrantConfig
+}
+
+var map_HTPasswdPasswordIdentityProvider = map[string]string{
+ "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "file": "file is a reference to your htpasswd file",
+}
+
+func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_HTPasswdPasswordIdentityProvider
+}
+
+var map_IdentityProvider = map[string]string{
+ "": "IdentityProvider provides identities for users authenticating using credentials",
+ "name": "name is used to qualify the identities returned by this provider",
+ "challenge": "challenge indicates whether to issue WWW-Authenticate challenges for this provider",
+ "login": "login indicates whether to use this identity provider for unauthenticated browsers to login against",
+ "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users",
+ "provider": "provider contains the information about how to set up a specific identity provider",
+}
+
+func (IdentityProvider) SwaggerDoc() map[string]string {
+ return map_IdentityProvider
+}
+
+var map_KeystonePasswordIdentityProvider = map[string]string{
+ "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "domainName": "domainName is required for keystone v3",
+ "useKeystoneIdentity": "useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username",
+}
+
+func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_KeystonePasswordIdentityProvider
+}
+
+var map_LDAPAttributeMapping = map[string]string{
+ "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields",
+ "id": "id is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"",
+ "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"",
+ "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"",
+ "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+}
+
+func (LDAPAttributeMapping) SwaggerDoc() map[string]string {
+ return map_LDAPAttributeMapping
+}
+
+var map_LDAPPasswordIdentityProvider = map[string]string{
+ "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter",
+ "bindDN": "bindDN is an optional DN to bind with during the search phase.",
+ "bindPassword": "bindPassword is an optional password to bind with during the search phase.",
+ "insecure": "insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830",
+ "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used",
+ "attributes": "attributes maps LDAP attributes to identities",
+}
+
+func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string {
+ return map_LDAPPasswordIdentityProvider
+}
+
+var map_OAuthConfig = map[string]string{
+ "": "OAuthConfig holds the necessary configuration options for OAuth authentication",
+ "masterCA": "masterCA is the CA for verifying the TLS connection back to the MasterURL. This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated",
+ "masterURL": "masterURL is used for making server-to-server calls to exchange authorization codes for access tokens This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated",
+ "masterPublicURL": "masterPublicURL is used for building valid client redirect URLs for internal and external access This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated",
+ "loginURL": "loginURL, along with masterCA, masterURL and masterPublicURL have distinct meanings depending on how the OAuth server is run. The two states are: 1. embedded in the kube api server (all 3.x releases) 2. as a standalone external process (all 4.x releases) in the embedded configuration, loginURL is equivalent to masterPublicURL and the other fields have functionality that matches their docs. in the standalone configuration, the fields are used as: loginURL is the URL required to login to the cluster: oc login --server= masterPublicURL is the issuer URL it is accessible from inside (service network) and outside (ingress) of the cluster masterURL is the loopback variation of the token_endpoint URL with no path component it is only accessible from inside (service network) of the cluster masterCA is used to perform TLS verification for connections made to masterURL For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2",
+ "assetPublicURL": "assetPublicURL is used for building valid client redirect URLs for external access",
+ "alwaysShowProviderSelection": "alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.",
+ "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves",
+ "grantConfig": "grantConfig describes how to handle grants",
+ "sessionConfig": "sessionConfig hold information about configuring sessions.",
+ "tokenConfig": "tokenConfig contains options for authorization and access tokens",
+ "templates": "templates allow you to customize pages like the login page.",
+}
+
+func (OAuthConfig) SwaggerDoc() map[string]string {
+ return map_OAuthConfig
+}
+
+var map_OAuthTemplates = map[string]string{
+ "": "OAuthTemplates allow for customization of pages like the login page",
+ "login": "login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.",
+ "providerSelection": "providerSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.",
+ "error": "error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.",
+}
+
+func (OAuthTemplates) SwaggerDoc() map[string]string {
+ return map_OAuthTemplates
+}
+
+var map_OpenIDClaims = map[string]string{
+ "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider",
+ "id": "id is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"",
+ "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim",
+ "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity",
+ "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity",
+ "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user",
+}
+
+func (OpenIDClaims) SwaggerDoc() map[string]string {
+ return map_OpenIDClaims
+}
+
+var map_OpenIDIdentityProvider = map[string]string{
+ "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used",
+ "clientID": "clientID is the oauth client ID",
+ "clientSecret": "clientSecret is the oauth client secret",
+ "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.",
+ "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.",
+ "urls": "urls to use to authenticate",
+ "claims": "claims mappings",
+}
+
+func (OpenIDIdentityProvider) SwaggerDoc() map[string]string {
+ return map_OpenIDIdentityProvider
+}
+
+var map_OpenIDURLs = map[string]string{
+ "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider",
+ "authorize": "authorize is the oauth authorization URL",
+ "token": "token is the oauth token granting URL",
+ "userInfo": "userInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims",
+}
+
+func (OpenIDURLs) SwaggerDoc() map[string]string {
+ return map_OpenIDURLs
+}
+
+var map_OsinServerConfig = map[string]string{
+ "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "oauthConfig": "oauthConfig holds the necessary configuration options for OAuth authentication",
+}
+
+func (OsinServerConfig) SwaggerDoc() map[string]string {
+ return map_OsinServerConfig
+}
+
+var map_RequestHeaderIdentityProvider = map[string]string{
+ "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}",
+ "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}",
+ "clientCA": "clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.",
+ "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.",
+ "headers": "headers is the set of headers to check for identity information",
+ "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username",
+ "nameHeaders": "nameHeaders is the set of headers to check for the display name",
+ "emailHeaders": "emailHeaders is the set of headers to check for the email address",
+}
+
+func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string {
+ return map_RequestHeaderIdentityProvider
+}
+
+var map_SessionConfig = map[string]string{
+ "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession",
+ "sessionSecretsFile": "sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start",
+ "sessionMaxAgeSeconds": "sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession",
+ "sessionName": "sessionName is the cookie name used to store the session",
+}
+
+func (SessionConfig) SwaggerDoc() map[string]string {
+ return map_SessionConfig
+}
+
+var map_SessionSecret = map[string]string{
+ "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions",
+ "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.",
+ "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-",
+}
+
+func (SessionSecret) SwaggerDoc() map[string]string {
+ return map_SessionSecret
+}
+
+var map_SessionSecrets = map[string]string{
+ "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.",
+}
+
+func (SessionSecrets) SwaggerDoc() map[string]string {
+ return map_SessionSecrets
+}
+
+var map_TokenConfig = map[string]string{
+ "": "TokenConfig holds the necessary configuration options for authorization and access tokens",
+ "authorizeTokenMaxAgeSeconds": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens",
+ "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens",
+ "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.",
+ "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.",
+}
+
+func (TokenConfig) SwaggerDoc() map[string]string {
+ return map_TokenConfig
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/pkg/serialization/serialization.go b/vendor/github.com/openshift/api/pkg/serialization/serialization.go
new file mode 100644
index 0000000000..70c8e7a994
--- /dev/null
+++ b/vendor/github.com/openshift/api/pkg/serialization/serialization.go
@@ -0,0 +1,45 @@
+package serialization
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DecodeNestedRawExtensionOrUnknown
+func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) {
+ if ext.Raw == nil || ext.Object != nil {
+ return
+ }
+ obj, gvk, err := d.Decode(ext.Raw, nil, nil)
+ if err != nil {
+ unk := &runtime.Unknown{Raw: ext.Raw}
+ if runtime.IsNotRegisteredError(err) {
+ if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil {
+ unk.APIVersion = gvk.GroupVersion().String()
+ unk.Kind = gvk.Kind
+ ext.Object = unk
+ return
+ }
+ }
+ // TODO: record mime-type with the object
+ if gvk != nil {
+ unk.APIVersion = gvk.GroupVersion().String()
+ unk.Kind = gvk.Kind
+ }
+ obj = unk
+ }
+ ext.Object = obj
+}
+
+// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or
+// return an error.
+func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error {
+ if ext.Raw != nil || ext.Object == nil {
+ return nil
+ }
+ data, err := runtime.Encode(e, ext.Object)
+ if err != nil {
+ return err
+ }
+ ext.Raw = data
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/project/OWNERS b/vendor/github.com/openshift/api/project/OWNERS
new file mode 100644
index 0000000000..9b1548f568
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/OWNERS
@@ -0,0 +1,2 @@
+reviewers:
+ - mfojtik
diff --git a/vendor/github.com/openshift/api/project/install.go b/vendor/github.com/openshift/api/project/install.go
new file mode 100644
index 0000000000..c96c7aa265
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/install.go
@@ -0,0 +1,26 @@
+package project
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ projectv1 "github.com/openshift/api/project/v1"
+)
+
+const (
+ GroupName = "project.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(projectv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/project/v1/doc.go b/vendor/github.com/openshift/api/project/v1/doc.go
new file mode 100644
index 0000000000..5bbd9d5ea7
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/project/apis/project
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=project.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go
new file mode 100644
index 0000000000..822dbbc301
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go
@@ -0,0 +1,1305 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/project/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/core/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Project) Reset() { *m = Project{} }
+func (*Project) ProtoMessage() {}
+func (*Project) Descriptor() ([]byte, []int) {
+ return fileDescriptor_fbf46eaac05029bf, []int{0}
+}
+func (m *Project) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Project) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Project.Merge(m, src)
+}
+func (m *Project) XXX_Size() int {
+ return m.Size()
+}
+func (m *Project) XXX_DiscardUnknown() {
+ xxx_messageInfo_Project.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Project proto.InternalMessageInfo
+
+func (m *ProjectList) Reset() { *m = ProjectList{} }
+func (*ProjectList) ProtoMessage() {}
+func (*ProjectList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_fbf46eaac05029bf, []int{1}
+}
+func (m *ProjectList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectList.Merge(m, src)
+}
+func (m *ProjectList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectList proto.InternalMessageInfo
+
+func (m *ProjectRequest) Reset() { *m = ProjectRequest{} }
+func (*ProjectRequest) ProtoMessage() {}
+func (*ProjectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_fbf46eaac05029bf, []int{2}
+}
+func (m *ProjectRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectRequest.Merge(m, src)
+}
+func (m *ProjectRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectRequest proto.InternalMessageInfo
+
+func (m *ProjectSpec) Reset() { *m = ProjectSpec{} }
+func (*ProjectSpec) ProtoMessage() {}
+func (*ProjectSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_fbf46eaac05029bf, []int{3}
+}
+func (m *ProjectSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectSpec.Merge(m, src)
+}
+func (m *ProjectSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo
+
+func (m *ProjectStatus) Reset() { *m = ProjectStatus{} }
+func (*ProjectStatus) ProtoMessage() {}
+func (*ProjectStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_fbf46eaac05029bf, []int{4}
+}
+func (m *ProjectStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ProjectStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProjectStatus.Merge(m, src)
+}
+func (m *ProjectStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProjectStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProjectStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project")
+ proto.RegisterType((*ProjectList)(nil), "github.com.openshift.api.project.v1.ProjectList")
+ proto.RegisterType((*ProjectRequest)(nil), "github.com.openshift.api.project.v1.ProjectRequest")
+ proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec")
+ proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptor_fbf46eaac05029bf)
+}
+
+var fileDescriptor_fbf46eaac05029bf = []byte{
+ // 573 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xd3, 0x30,
+ 0x18, 0xc7, 0x9b, 0x6d, 0x1d, 0xab, 0xcb, 0x26, 0x14, 0x2e, 0x55, 0x0f, 0x69, 0xc9, 0x24, 0xd4,
+ 0x03, 0x38, 0xb4, 0xbc, 0x88, 0x73, 0x40, 0x88, 0x49, 0xbc, 0x0c, 0x73, 0xab, 0x38, 0xe0, 0xa6,
+ 0x6e, 0x6a, 0xba, 0xc4, 0x26, 0x76, 0x2b, 0x8d, 0x13, 0x1f, 0x81, 0x3b, 0x9f, 0x83, 0x2b, 0xe7,
+ 0x1e, 0x77, 0xdc, 0xa9, 0x5a, 0xc3, 0xb7, 0xd8, 0x09, 0xd9, 0x71, 0x93, 0xc0, 0x8a, 0xd4, 0x5d,
+ 0xb8, 0xd5, 0x4f, 0xfe, 0xbf, 0x9f, 0xed, 0xe7, 0x49, 0x03, 0x1e, 0x86, 0x54, 0x8e, 0xa7, 0x03,
+ 0x18, 0xb0, 0xc8, 0x63, 0x9c, 0xc4, 0x62, 0x4c, 0x47, 0xd2, 0xc3, 0x9c, 0x7a, 0x3c, 0x61, 0x9f,
+ 0x48, 0x20, 0xbd, 0x59, 0xd7, 0x0b, 0x49, 0x4c, 0x12, 0x2c, 0xc9, 0x10, 0xf2, 0x84, 0x49, 0x66,
+ 0x1f, 0x16, 0x10, 0xcc, 0x21, 0x88, 0x39, 0x85, 0x06, 0x82, 0xb3, 0x6e, 0xf3, 0x7e, 0xc9, 0x1c,
+ 0xb2, 0x90, 0x79, 0x9a, 0x1d, 0x4c, 0x47, 0x7a, 0xa5, 0x17, 0xfa, 0x57, 0xe6, 0x6c, 0xba, 0x93,
+ 0xa7, 0x02, 0x52, 0xa6, 0xb7, 0x0e, 0x58, 0x42, 0xd6, 0xec, 0xdb, 0x7c, 0x54, 0x64, 0x22, 0x1c,
+ 0x8c, 0x69, 0x4c, 0x92, 0x53, 0x8f, 0x4f, 0x42, 0x55, 0x10, 0x5e, 0x44, 0x24, 0x5e, 0x47, 0x3d,
+ 0xf9, 0x17, 0x95, 0x4c, 0x63, 0x49, 0x23, 0xe2, 0x89, 0x60, 0x4c, 0x22, 0xfc, 0x37, 0xe7, 0x7e,
+ 0xdf, 0x02, 0x37, 0x8e, 0xb3, 0xfb, 0xd8, 0x1f, 0xc1, 0x9e, 0xd2, 0x0f, 0xb1, 0xc4, 0x0d, 0xab,
+ 0x6d, 0x75, 0xea, 0xbd, 0x07, 0x30, 0xd3, 0xc2, 0xb2, 0x16, 0xf2, 0x49, 0xa8, 0x0a, 0x02, 0xaa,
+ 0x34, 0x9c, 0x75, 0xe1, 0xdb, 0x81, 0xe2, 0x5f, 0x13, 0x89, 0x7d, 0x7b, 0xbe, 0x68, 0x55, 0xd2,
+ 0x45, 0x0b, 0x14, 0x35, 0x94, 0x5b, 0x6d, 0x04, 0x76, 0x04, 0x27, 0x41, 0x63, 0xcb, 0xd8, 0x37,
+ 0x68, 0x31, 0x34, 0xa7, 0x7b, 0xcf, 0x49, 0xe0, 0xdf, 0x34, 0xf6, 0x1d, 0xb5, 0x42, 0xda, 0x65,
+ 0xf7, 0xc1, 0xae, 0x90, 0x58, 0x4e, 0x45, 0x63, 0x5b, 0x5b, 0x7b, 0xd7, 0xb2, 0x6a, 0xd2, 0x3f,
+ 0x30, 0xde, 0xdd, 0x6c, 0x8d, 0x8c, 0xd1, 0xfd, 0x69, 0x81, 0xba, 0x49, 0xbe, 0xa2, 0x42, 0xda,
+ 0x1f, 0xae, 0x74, 0x08, 0x6e, 0xd6, 0x21, 0x45, 0xeb, 0xfe, 0xdc, 0x32, 0x3b, 0xed, 0xad, 0x2a,
+ 0xa5, 0xee, 0xbc, 0x03, 0x55, 0x2a, 0x49, 0x24, 0x1a, 0x5b, 0xed, 0xed, 0x4e, 0xbd, 0x77, 0xef,
+ 0x3a, 0x17, 0xf1, 0xf7, 0x8d, 0xb8, 0x7a, 0xa4, 0x14, 0x28, 0x33, 0xb9, 0x17, 0x16, 0x38, 0x30,
+ 0x09, 0x44, 0x3e, 0x4f, 0x89, 0xf8, 0x1f, 0x53, 0x7e, 0x0c, 0xea, 0x43, 0x2a, 0xf8, 0x09, 0x3e,
+ 0x7d, 0x83, 0x23, 0xa2, 0x87, 0x5d, 0xf3, 0x6f, 0x1b, 0xa4, 0xfe, 0xbc, 0x78, 0x84, 0xca, 0x39,
+ 0x8d, 0x11, 0x11, 0x24, 0x94, 0x4b, 0xca, 0x62, 0x3d, 0xcd, 0x32, 0x56, 0x3c, 0x42, 0xe5, 0x9c,
+ 0x8b, 0xf3, 0x11, 0xa9, 0x97, 0xc2, 0x46, 0x00, 0x8c, 0x68, 0x8c, 0x4f, 0xe8, 0x17, 0x92, 0x88,
+ 0x86, 0xd5, 0xde, 0xee, 0xd4, 0xfc, 0x9e, 0x3a, 0xea, 0x8b, 0xbc, 0x7a, 0xb9, 0x68, 0xb5, 0xaf,
+ 0xfe, 0x11, 0x61, 0x1e, 0xd0, 0x47, 0x2b, 0x59, 0xdc, 0x1f, 0x16, 0xd8, 0xff, 0xe3, 0x85, 0xb1,
+ 0x5f, 0x82, 0x2a, 0x1f, 0x63, 0x41, 0x74, 0x07, 0x6b, 0x7e, 0x6f, 0xd5, 0xfc, 0x63, 0x55, 0xbc,
+ 0x5c, 0xb4, 0xee, 0xac, 0xf1, 0x2b, 0xad, 0xe0, 0x38, 0x20, 0x3a, 0x84, 0x32, 0x81, 0xdd, 0x07,
+ 0x20, 0x60, 0xf1, 0x90, 0xaa, 0xbb, 0xac, 0x26, 0x7f, 0xb7, 0x34, 0x10, 0xa8, 0x70, 0x58, 0xc6,
+ 0x9f, 0xad, 0xe2, 0xc5, 0x18, 0xf2, 0x92, 0x40, 0x25, 0x9b, 0x7f, 0x34, 0x5f, 0x3a, 0x95, 0xb3,
+ 0xa5, 0x53, 0x39, 0x5f, 0x3a, 0x95, 0xaf, 0xa9, 0x63, 0xcd, 0x53, 0xc7, 0x3a, 0x4b, 0x1d, 0xeb,
+ 0x3c, 0x75, 0xac, 0x8b, 0xd4, 0xb1, 0xbe, 0xfd, 0x72, 0x2a, 0xfd, 0xc3, 0x0d, 0xbe, 0x8e, 0xbf,
+ 0x03, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x9b, 0x1f, 0xba, 0x43, 0x05, 0x00, 0x00,
+}
+
+func (m *Project) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Project) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.DisplayName)
+ copy(dAtA[i:], m.DisplayName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Finalizers) > 0 {
+ for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Finalizers[iNdEx])
+ copy(dAtA[i:], m.Finalizers[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ProjectStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.Phase)
+ copy(dAtA[i:], m.Phase)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Project) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ProjectList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ProjectRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DisplayName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ProjectSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Finalizers) > 0 {
+ for _, s := range m.Finalizers {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ProjectStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Phase)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Project) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Project{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Project{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ProjectList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProjectRequest{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`,
+ `Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ProjectSpec{`,
+ `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ProjectStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]NamespaceCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ProjectStatus{`,
+ `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Project) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Project: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Project{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisplayName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Finalizers = append(m.Finalizers, k8s_io_api_core_v1.FinalizerName(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProjectStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, v11.NamespaceCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto
new file mode 100644
index 0000000000..c86bd80393
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/generated.proto
@@ -0,0 +1,90 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.project.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/project/v1";
+
+// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members,
+// a quota on the resources that the project may consume, and the security controls on the resources in
+// the project. Within a project, members may have different roles - project administrators can set
+// membership, editors can create and manage the resources, and viewers can see but not access running
+// containers. In a normal cluster project administrators are not able to alter their quotas - that is
+// restricted to cluster administrators.
+//
+// Listing or watching projects will return only projects the user has the reader role on.
+//
+// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed
+// as editable to end users while namespaces are not. Direct creation of a project is typically restricted
+// to administrators, while end users should use the requestproject resource.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message Project {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the behavior of the Namespace.
+ optional ProjectSpec spec = 2;
+
+ // Status describes the current status of a Namespace
+ // +optional
+ optional ProjectStatus status = 3;
+}
+
+// ProjectList is a list of Project objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ProjectList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of projects
+ repeated Project items = 2;
+}
+
+// ProjectRequest is the set of options necessary to fully qualify a project request
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ProjectRequest {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // DisplayName is the display name to apply to a project
+ optional string displayName = 2;
+
+ // Description is the description to apply to a project
+ optional string description = 3;
+}
+
+// ProjectSpec describes the attributes on a Project
+message ProjectSpec {
+ // Finalizers is an opaque list of values that must be empty to permanently remove object from storage
+ repeated string finalizers = 1;
+}
+
+// ProjectStatus is information about the current status of a Project
+message ProjectStatus {
+ // Phase is the current lifecycle phase of the project
+ // +optional
+ optional string phase = 1;
+
+ // Represents the latest available observations of the project current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated k8s.io.api.core.v1.NamespaceCondition conditions = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/project/v1/legacy.go b/vendor/github.com/openshift/api/project/v1/legacy.go
new file mode 100644
index 0000000000..186f905f3a
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/legacy.go
@@ -0,0 +1,23 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &Project{},
+ &ProjectList{},
+ &ProjectRequest{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/project/v1/register.go b/vendor/github.com/openshift/api/project/v1/register.go
new file mode 100644
index 0000000000..e471716ce8
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/register.go
@@ -0,0 +1,40 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "project.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &Project{},
+ &ProjectList{},
+ &ProjectRequest{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go
new file mode 100644
index 0000000000..9c17a5deab
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/types.go
@@ -0,0 +1,111 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ProjectList is a list of Project objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ProjectList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of projects
+ Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+const (
+ // These are internal finalizer values to Origin
+ FinalizerOrigin corev1.FinalizerName = "openshift.io/origin"
+ // ProjectNodeSelector is an annotation that holds the node selector;
+ // the node selector annotation determines which nodes will have pods from this project scheduled to them
+ ProjectNodeSelector = "openshift.io/node-selector"
+
+ // ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present,
+ // but it is set by the default project template.
+ ProjectRequesterAnnotation = "openshift.io/requester"
+)
+
+// ProjectSpec describes the attributes on a Project
+type ProjectSpec struct {
+ // Finalizers is an opaque list of values that must be empty to permanently remove object from storage
+ Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"`
+}
+
+// ProjectStatus is information about the current status of a Project
+type ProjectStatus struct {
+ // Phase is the current lifecycle phase of the project
+ // +optional
+ Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"`
+
+ // Represents the latest available observations of the project current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members,
+// a quota on the resources that the project may consume, and the security controls on the resources in
+// the project. Within a project, members may have different roles - project administrators can set
+// membership, editors can create and manage the resources, and viewers can see but not access running
+// containers. In a normal cluster project administrators are not able to alter their quotas - that is
+// restricted to cluster administrators.
+//
+// Listing or watching projects will return only projects the user has the reader role on.
+//
+// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed
+// as editable to end users while namespaces are not. Direct creation of a project is typically restricted
+// to administrators, while end users should use the requestproject resource.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Project struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the behavior of the Namespace.
+ Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status describes the current status of a Namespace
+ // +optional
+ Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch
+// +genclient:method=Create,verb=create,result=Project
+
+// ProjectRequest is the set of options necessary to fully qualify a project request
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ProjectRequest struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // DisplayName is the display name to apply to a project
+ DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"`
+ // Description is the description to apply to a project
+ Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"`
+}
diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..ddbdda971d
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go
@@ -0,0 +1,142 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Project) DeepCopyInto(out *Project) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project.
+func (in *Project) DeepCopy() *Project {
+ if in == nil {
+ return nil
+ }
+ out := new(Project)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Project) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectList) DeepCopyInto(out *ProjectList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Project, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList.
+func (in *ProjectList) DeepCopy() *ProjectList {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectRequest) DeepCopyInto(out *ProjectRequest) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRequest.
+func (in *ProjectRequest) DeepCopy() *ProjectRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProjectRequest) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) {
+ *out = *in
+ if in.Finalizers != nil {
+ in, out := &in.Finalizers, &out.Finalizers
+ *out = make([]corev1.FinalizerName, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec.
+func (in *ProjectSpec) DeepCopy() *ProjectSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]corev1.NamespaceCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus.
+func (in *ProjectStatus) DeepCopy() *ProjectStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ProjectStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..890e651d73
--- /dev/null
+++ b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,65 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Project = map[string]string{
+ "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the behavior of the Namespace.",
+ "status": "Status describes the current status of a Namespace",
+}
+
+func (Project) SwaggerDoc() map[string]string {
+ return map_Project
+}
+
+var map_ProjectList = map[string]string{
+ "": "ProjectList is a list of Project objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of projects",
+}
+
+func (ProjectList) SwaggerDoc() map[string]string {
+ return map_ProjectList
+}
+
+var map_ProjectRequest = map[string]string{
+ "": "ProjectRequest is the set of options necessary to fully qualify a project request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "displayName": "DisplayName is the display name to apply to a project",
+ "description": "Description is the description to apply to a project",
+}
+
+func (ProjectRequest) SwaggerDoc() map[string]string {
+ return map_ProjectRequest
+}
+
+var map_ProjectSpec = map[string]string{
+ "": "ProjectSpec describes the attributes on a Project",
+ "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage",
+}
+
+func (ProjectSpec) SwaggerDoc() map[string]string {
+ return map_ProjectSpec
+}
+
+var map_ProjectStatus = map[string]string{
+ "": "ProjectStatus is information about the current status of a Project",
+ "phase": "Phase is the current lifecycle phase of the project",
+ "conditions": "Represents the latest available observations of the project current state.",
+}
+
+func (ProjectStatus) SwaggerDoc() map[string]string {
+ return map_ProjectStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/quota/OWNERS b/vendor/github.com/openshift/api/quota/OWNERS
new file mode 100644
index 0000000000..75dbd7b566
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/OWNERS
@@ -0,0 +1,3 @@
+reviewers:
+ - deads2k
+ - mfojtik
diff --git a/vendor/github.com/openshift/api/quota/install.go b/vendor/github.com/openshift/api/quota/install.go
new file mode 100644
index 0000000000..2a88e7d0a4
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/install.go
@@ -0,0 +1,26 @@
+package quota
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ quotav1 "github.com/openshift/api/quota/v1"
+)
+
+const (
+ GroupName = "quota.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(quotav1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/quota/v1/Makefile b/vendor/github.com/openshift/api/quota/v1/Makefile
new file mode 100644
index 0000000000..691859dd82
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="quota.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/quota/v1/doc.go b/vendor/github.com/openshift/api/quota/v1/doc.go
new file mode 100644
index 0000000000..ae5c9c2c76
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/quota/apis/quota
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=quota.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/quota/v1/generated.pb.go b/vendor/github.com/openshift/api/quota/v1/generated.pb.go
new file mode 100644
index 0000000000..7556462cff
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/generated.pb.go
@@ -0,0 +1,2152 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/quota/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *AppliedClusterResourceQuota) Reset() { *m = AppliedClusterResourceQuota{} }
+func (*AppliedClusterResourceQuota) ProtoMessage() {}
+func (*AppliedClusterResourceQuota) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{0}
+}
+func (m *AppliedClusterResourceQuota) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AppliedClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AppliedClusterResourceQuota) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AppliedClusterResourceQuota.Merge(m, src)
+}
+func (m *AppliedClusterResourceQuota) XXX_Size() int {
+ return m.Size()
+}
+func (m *AppliedClusterResourceQuota) XXX_DiscardUnknown() {
+ xxx_messageInfo_AppliedClusterResourceQuota.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AppliedClusterResourceQuota proto.InternalMessageInfo
+
+func (m *AppliedClusterResourceQuotaList) Reset() { *m = AppliedClusterResourceQuotaList{} }
+func (*AppliedClusterResourceQuotaList) ProtoMessage() {}
+func (*AppliedClusterResourceQuotaList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{1}
+}
+func (m *AppliedClusterResourceQuotaList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AppliedClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AppliedClusterResourceQuotaList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AppliedClusterResourceQuotaList.Merge(m, src)
+}
+func (m *AppliedClusterResourceQuotaList) XXX_Size() int {
+ return m.Size()
+}
+func (m *AppliedClusterResourceQuotaList) XXX_DiscardUnknown() {
+ xxx_messageInfo_AppliedClusterResourceQuotaList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AppliedClusterResourceQuotaList proto.InternalMessageInfo
+
+func (m *ClusterResourceQuota) Reset() { *m = ClusterResourceQuota{} }
+func (*ClusterResourceQuota) ProtoMessage() {}
+func (*ClusterResourceQuota) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{2}
+}
+func (m *ClusterResourceQuota) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterResourceQuota) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterResourceQuota.Merge(m, src)
+}
+func (m *ClusterResourceQuota) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterResourceQuota) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterResourceQuota.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterResourceQuota proto.InternalMessageInfo
+
+func (m *ClusterResourceQuotaList) Reset() { *m = ClusterResourceQuotaList{} }
+func (*ClusterResourceQuotaList) ProtoMessage() {}
+func (*ClusterResourceQuotaList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{3}
+}
+func (m *ClusterResourceQuotaList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterResourceQuotaList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterResourceQuotaList.Merge(m, src)
+}
+func (m *ClusterResourceQuotaList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterResourceQuotaList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterResourceQuotaList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterResourceQuotaList proto.InternalMessageInfo
+
+func (m *ClusterResourceQuotaSelector) Reset() { *m = ClusterResourceQuotaSelector{} }
+func (*ClusterResourceQuotaSelector) ProtoMessage() {}
+func (*ClusterResourceQuotaSelector) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{4}
+}
+func (m *ClusterResourceQuotaSelector) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterResourceQuotaSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterResourceQuotaSelector) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterResourceQuotaSelector.Merge(m, src)
+}
+func (m *ClusterResourceQuotaSelector) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterResourceQuotaSelector) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterResourceQuotaSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterResourceQuotaSelector proto.InternalMessageInfo
+
+func (m *ClusterResourceQuotaSpec) Reset() { *m = ClusterResourceQuotaSpec{} }
+func (*ClusterResourceQuotaSpec) ProtoMessage() {}
+func (*ClusterResourceQuotaSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{5}
+}
+func (m *ClusterResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterResourceQuotaSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterResourceQuotaSpec.Merge(m, src)
+}
+func (m *ClusterResourceQuotaSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterResourceQuotaSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterResourceQuotaSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterResourceQuotaSpec proto.InternalMessageInfo
+
+func (m *ClusterResourceQuotaStatus) Reset() { *m = ClusterResourceQuotaStatus{} }
+func (*ClusterResourceQuotaStatus) ProtoMessage() {}
+func (*ClusterResourceQuotaStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{6}
+}
+func (m *ClusterResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ClusterResourceQuotaStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterResourceQuotaStatus.Merge(m, src)
+}
+func (m *ClusterResourceQuotaStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterResourceQuotaStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterResourceQuotaStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterResourceQuotaStatus proto.InternalMessageInfo
+
+func (m *ResourceQuotaStatusByNamespace) Reset() { *m = ResourceQuotaStatusByNamespace{} }
+func (*ResourceQuotaStatusByNamespace) ProtoMessage() {}
+func (*ResourceQuotaStatusByNamespace) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f605e5b8440aecb8, []int{7}
+}
+func (m *ResourceQuotaStatusByNamespace) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResourceQuotaStatusByNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ResourceQuotaStatusByNamespace) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceQuotaStatusByNamespace.Merge(m, src)
+}
+func (m *ResourceQuotaStatusByNamespace) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResourceQuotaStatusByNamespace) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceQuotaStatusByNamespace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceQuotaStatusByNamespace proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*AppliedClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuota")
+ proto.RegisterType((*AppliedClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuotaList")
+ proto.RegisterType((*ClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuota")
+ proto.RegisterType((*ClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaList")
+ proto.RegisterType((*ClusterResourceQuotaSelector)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector.AnnotationsEntry")
+ proto.RegisterType((*ClusterResourceQuotaSpec)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSpec")
+ proto.RegisterType((*ClusterResourceQuotaStatus)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaStatus")
+ proto.RegisterType((*ResourceQuotaStatusByNamespace)(nil), "github.com.openshift.api.quota.v1.ResourceQuotaStatusByNamespace")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/quota/v1/generated.proto", fileDescriptor_f605e5b8440aecb8)
+}
+
+var fileDescriptor_f605e5b8440aecb8 = []byte{
+ // 716 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x6f, 0xd3, 0x3e,
+ 0x1c, 0x6d, 0xba, 0x75, 0x5a, 0xbd, 0xff, 0xfe, 0xda, 0xac, 0x1d, 0xaa, 0x82, 0xd2, 0x2d, 0x12,
+ 0x62, 0x17, 0x1c, 0x3a, 0x10, 0x4c, 0x20, 0x86, 0x16, 0x84, 0x10, 0x68, 0x30, 0x08, 0x9c, 0xd0,
+ 0x40, 0xb8, 0x99, 0xd7, 0x86, 0x26, 0x71, 0x88, 0x9d, 0x4a, 0xbd, 0xf1, 0x09, 0x10, 0x9f, 0x81,
+ 0x0f, 0xc2, 0x0d, 0x69, 0x37, 0x76, 0x01, 0xed, 0x34, 0xd1, 0xc0, 0x07, 0x41, 0x76, 0xdc, 0xa4,
+ 0xdb, 0xda, 0xad, 0x6c, 0x07, 0x2e, 0xdc, 0xe2, 0x5f, 0xfd, 0xde, 0xfb, 0xfd, 0x5e, 0x9e, 0xdd,
+ 0x80, 0x7a, 0xd3, 0xe5, 0xad, 0xb8, 0x81, 0x1c, 0xea, 0x9b, 0x34, 0x24, 0x01, 0x6b, 0xb9, 0x3b,
+ 0xdc, 0xc4, 0xa1, 0x6b, 0xbe, 0x8b, 0x29, 0xc7, 0x66, 0xa7, 0x6e, 0x36, 0x49, 0x40, 0x22, 0xcc,
+ 0xc9, 0x36, 0x0a, 0x23, 0xca, 0x29, 0x5c, 0xca, 0x21, 0x28, 0x83, 0x20, 0x1c, 0xba, 0x48, 0x42,
+ 0x50, 0xa7, 0x5e, 0xbd, 0x32, 0xc0, 0xda, 0xa4, 0x4d, 0x6a, 0x4a, 0x64, 0x23, 0xde, 0x91, 0x2b,
+ 0xb9, 0x90, 0x4f, 0x29, 0x63, 0xd5, 0x68, 0xaf, 0x32, 0xe4, 0x52, 0x29, 0xeb, 0xd0, 0x88, 0x0c,
+ 0x51, 0xad, 0x5e, 0xcf, 0xf7, 0xf8, 0xd8, 0x69, 0xb9, 0x01, 0x89, 0xba, 0x66, 0xd8, 0x6e, 0x8a,
+ 0x02, 0x33, 0x7d, 0x32, 0xb4, 0xd7, 0xea, 0x8d, 0x51, 0xa8, 0x28, 0x0e, 0xb8, 0xeb, 0x13, 0x93,
+ 0x39, 0x2d, 0xe2, 0xe3, 0xa3, 0x38, 0xe3, 0x4b, 0x11, 0x5c, 0x58, 0x0f, 0x43, 0xcf, 0x25, 0xdb,
+ 0xf7, 0xbc, 0x98, 0x71, 0x12, 0xd9, 0x84, 0xd1, 0x38, 0x72, 0xc8, 0x33, 0x31, 0x23, 0x7c, 0x03,
+ 0xa6, 0x85, 0xe4, 0x36, 0xe6, 0xb8, 0xa2, 0x2d, 0x6a, 0xcb, 0x33, 0x2b, 0x57, 0x51, 0x2a, 0x85,
+ 0x06, 0xa5, 0x50, 0xd8, 0x6e, 0x8a, 0x02, 0x43, 0x62, 0x37, 0xea, 0xd4, 0xd1, 0x66, 0xe3, 0x2d,
+ 0x71, 0xf8, 0x63, 0xc2, 0xb1, 0x05, 0x77, 0x0f, 0x6a, 0x85, 0xe4, 0xa0, 0x06, 0xf2, 0x9a, 0x9d,
+ 0xb1, 0xc2, 0x57, 0x60, 0x92, 0x85, 0xc4, 0xa9, 0x14, 0x25, 0xfb, 0x6d, 0x74, 0xaa, 0xe9, 0x68,
+ 0x58, 0xa3, 0xcf, 0x43, 0xe2, 0x58, 0xff, 0x29, 0xa1, 0x49, 0xb1, 0xb2, 0x25, 0x2d, 0x24, 0x60,
+ 0x8a, 0x71, 0xcc, 0x63, 0x56, 0x99, 0x90, 0x02, 0x77, 0xce, 0x2a, 0x20, 0x49, 0xac, 0xff, 0x95,
+ 0xc4, 0x54, 0xba, 0xb6, 0x15, 0xb9, 0xf1, 0x4b, 0x03, 0xb5, 0x13, 0x7c, 0xdc, 0x70, 0x19, 0x87,
+ 0x5b, 0xc7, 0xbc, 0x44, 0xe3, 0x79, 0x29, 0xd0, 0xd2, 0xc9, 0x39, 0xa5, 0x3e, 0xdd, 0xaf, 0x0c,
+ 0xf8, 0xe8, 0x80, 0x92, 0xcb, 0x89, 0xcf, 0x2a, 0xc5, 0xc5, 0x89, 0xe5, 0x99, 0x95, 0xb5, 0x31,
+ 0xe6, 0x3c, 0xa1, 0x61, 0x6b, 0x56, 0x49, 0x95, 0x1e, 0x0a, 0x52, 0x3b, 0xe5, 0x36, 0x3e, 0x17,
+ 0xc1, 0xc2, 0xbf, 0x9c, 0x9c, 0x23, 0x27, 0xdf, 0x35, 0x50, 0xf9, 0x4b, 0x01, 0xd9, 0x3a, 0x1c,
+ 0x90, 0x9b, 0x67, 0x1c, 0x70, 0x44, 0x32, 0xbe, 0x16, 0xc1, 0xc5, 0xa1, 0x7e, 0x10, 0x8f, 0x38,
+ 0x9c, 0x46, 0xf0, 0x35, 0x98, 0xf2, 0x70, 0x83, 0x78, 0x4c, 0x8d, 0x76, 0x6d, 0xcc, 0xd1, 0x04,
+ 0xa6, 0x4f, 0x62, 0xcd, 0x27, 0x07, 0xb5, 0xd9, 0x43, 0x25, 0x5b, 0xb1, 0xc2, 0x0f, 0x1a, 0x98,
+ 0xc1, 0x41, 0x40, 0x39, 0xe6, 0x2e, 0x0d, 0xfa, 0x53, 0x3e, 0x3d, 0xeb, 0x6b, 0x54, 0xf4, 0x68,
+ 0x3d, 0xa7, 0xbc, 0x1f, 0xf0, 0xa8, 0x6b, 0x55, 0xd5, 0xf8, 0x30, 0xff, 0x25, 0xeb, 0x65, 0xb0,
+ 0x81, 0xea, 0x1a, 0x98, 0x3b, 0x0a, 0x86, 0x73, 0x60, 0xa2, 0x4d, 0xba, 0xd2, 0x81, 0xb2, 0x2d,
+ 0x1e, 0xe1, 0x02, 0x28, 0x75, 0xb0, 0x17, 0x13, 0x99, 0xeb, 0xb2, 0x9d, 0x2e, 0x6e, 0x15, 0x57,
+ 0x35, 0xe3, 0xdb, 0x88, 0xa8, 0x88, 0xd0, 0x42, 0x1f, 0x4c, 0x33, 0xa5, 0xaa, 0xfc, 0xbc, 0x7b,
+ 0xce, 0x49, 0xf3, 0xec, 0x64, 0xe3, 0x64, 0x12, 0xf0, 0x11, 0x28, 0x49, 0x12, 0x75, 0xfa, 0x2e,
+ 0x0d, 0xbc, 0x3b, 0x24, 0xfe, 0xc8, 0x04, 0xf9, 0xf1, 0x73, 0x96, 0x25, 0x45, 0x96, 0xec, 0x94,
+ 0xc2, 0xe8, 0x69, 0xa0, 0x3a, 0xfa, 0xe4, 0xc0, 0x0d, 0x50, 0xe2, 0x94, 0x63, 0x4f, 0x8d, 0x75,
+ 0xf9, 0x74, 0xa9, 0xf4, 0xc4, 0x65, 0x62, 0x2f, 0x04, 0xda, 0x4e, 0x49, 0x60, 0x0c, 0x40, 0x80,
+ 0x7d, 0xc2, 0x42, 0xec, 0x90, 0x7e, 0x26, 0xd6, 0xc7, 0x70, 0x6a, 0x98, 0x42, 0xf7, 0x49, 0x9f,
+ 0x29, 0xbf, 0xaa, 0xb2, 0x12, 0xb3, 0x07, 0x84, 0x8c, 0x4f, 0x1a, 0xd0, 0x4f, 0xa6, 0x80, 0x26,
+ 0x28, 0x67, 0x80, 0x34, 0x10, 0xd6, 0xbc, 0x62, 0x2d, 0x67, 0xbb, 0xec, 0x7c, 0x0f, 0xdc, 0xcc,
+ 0x6e, 0xa8, 0xe2, 0x9f, 0x39, 0x33, 0xe2, 0x2e, 0xb2, 0x1e, 0xec, 0xf6, 0xf4, 0xc2, 0x5e, 0x4f,
+ 0x2f, 0xec, 0xf7, 0xf4, 0xc2, 0xfb, 0x44, 0xd7, 0x76, 0x13, 0x5d, 0xdb, 0x4b, 0x74, 0x6d, 0x3f,
+ 0xd1, 0xb5, 0x1f, 0x89, 0xae, 0x7d, 0xfc, 0xa9, 0x17, 0x5e, 0x2e, 0x9d, 0xfa, 0xe1, 0xf4, 0x3b,
+ 0x00, 0x00, 0xff, 0xff, 0xda, 0x49, 0x50, 0x7b, 0x5c, 0x09, 0x00, 0x00,
+}
+
+func (m *AppliedClusterResourceQuota) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AppliedClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AppliedClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *AppliedClusterResourceQuotaList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AppliedClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AppliedClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterResourceQuota) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterResourceQuotaList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterResourceQuotaSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterResourceQuotaSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterResourceQuotaSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.AnnotationSelector) > 0 {
+ keysForAnnotationSelector := make([]string, 0, len(m.AnnotationSelector))
+ for k := range m.AnnotationSelector {
+ keysForAnnotationSelector = append(keysForAnnotationSelector, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector)
+ for iNdEx := len(keysForAnnotationSelector) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.AnnotationSelector[string(keysForAnnotationSelector[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAnnotationSelector[iNdEx])
+ copy(dAtA[i:], keysForAnnotationSelector[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotationSelector[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.LabelSelector != nil {
+ {
+ size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterResourceQuotaSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterResourceQuotaStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Namespaces) > 0 {
+ for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.Total.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceQuotaStatusByNamespace) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceQuotaStatusByNamespace) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceQuotaStatusByNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *AppliedClusterResourceQuota) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *AppliedClusterResourceQuotaList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterResourceQuota) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ClusterResourceQuotaList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterResourceQuotaSelector) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LabelSelector != nil {
+ l = m.LabelSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.AnnotationSelector) > 0 {
+ for k, v := range m.AnnotationSelector {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ClusterResourceQuotaSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Quota.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ClusterResourceQuotaStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Total.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Namespaces) > 0 {
+ for _, e := range m.Namespaces {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ResourceQuotaStatusByNamespace) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AppliedClusterResourceQuota) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AppliedClusterResourceQuota{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AppliedClusterResourceQuotaList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]AppliedClusterResourceQuota{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AppliedClusterResourceQuota", "AppliedClusterResourceQuota", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&AppliedClusterResourceQuotaList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterResourceQuota) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterResourceQuota{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterResourceQuotaList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ClusterResourceQuota{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterResourceQuota", "ClusterResourceQuota", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ClusterResourceQuotaList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterResourceQuotaSelector) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForAnnotationSelector := make([]string, 0, len(this.AnnotationSelector))
+ for k := range this.AnnotationSelector {
+ keysForAnnotationSelector = append(keysForAnnotationSelector, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector)
+ mapStringForAnnotationSelector := "map[string]string{"
+ for _, k := range keysForAnnotationSelector {
+ mapStringForAnnotationSelector += fmt.Sprintf("%v: %v,", k, this.AnnotationSelector[k])
+ }
+ mapStringForAnnotationSelector += "}"
+ s := strings.Join([]string{`&ClusterResourceQuotaSelector{`,
+ `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `AnnotationSelector:` + mapStringForAnnotationSelector + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterResourceQuotaSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterResourceQuotaSpec{`,
+ `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "ClusterResourceQuotaSelector", "ClusterResourceQuotaSelector", 1), `&`, ``, 1) + `,`,
+ `Quota:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Quota), "ResourceQuotaSpec", "v11.ResourceQuotaSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterResourceQuotaStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForNamespaces := "[]ResourceQuotaStatusByNamespace{"
+ for _, f := range this.Namespaces {
+ repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "ResourceQuotaStatusByNamespace", "ResourceQuotaStatusByNamespace", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForNamespaces += "}"
+ s := strings.Join([]string{`&ClusterResourceQuotaStatus{`,
+ `Total:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Total), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
+ `Namespaces:` + repeatedStringForNamespaces + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ResourceQuotaStatusByNamespace) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ResourceQuotaStatusByNamespace{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *AppliedClusterResourceQuota) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AppliedClusterResourceQuota: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AppliedClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AppliedClusterResourceQuotaList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AppliedClusterResourceQuotaList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AppliedClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, AppliedClusterResourceQuota{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterResourceQuota) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterResourceQuota: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterResourceQuotaList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterResourceQuotaList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ClusterResourceQuota{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterResourceQuotaSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterResourceQuotaSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterResourceQuotaSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LabelSelector == nil {
+ m.LabelSelector = &v1.LabelSelector{}
+ }
+ if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AnnotationSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AnnotationSelector == nil {
+ m.AnnotationSelector = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.AnnotationSelector[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterResourceQuotaSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterResourceQuotaSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterResourceQuotaStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterResourceQuotaStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Total.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespaces = append(m.Namespaces, ResourceQuotaStatusByNamespace{})
+ if err := m.Namespaces[len(m.Namespaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResourceQuotaStatusByNamespace) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/quota/v1/generated.proto b/vendor/github.com/openshift/api/quota/v1/generated.proto
new file mode 100644
index 0000000000..a72b97c884
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/generated.proto
@@ -0,0 +1,129 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.quota.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/quota/v1";
+
+// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection
+// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to
+// his project and their associated usage.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message AppliedClusterResourceQuota {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired quota
+ optional ClusterResourceQuotaSpec spec = 2;
+
+ // Status defines the actual enforced quota and its current usage
+ optional ClusterResourceQuotaStatus status = 3;
+}
+
+// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message AppliedClusterResourceQuotaList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of AppliedClusterResourceQuota
+ repeated AppliedClusterResourceQuota items = 2;
+}
+
+// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to
+// synthetic ResourceQuota object to allow quota evaluation re-use.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clusterresourcequotas,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01
+// +openshift:compatibility-gen:level=1
+message ClusterResourceQuota {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired quota
+ optional ClusterResourceQuotaSpec spec = 2;
+
+ // Status defines the actual enforced quota and its current usage
+ optional ClusterResourceQuotaStatus status = 3;
+}
+
+// ClusterResourceQuotaList is a collection of ClusterResourceQuotas
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ClusterResourceQuotaList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of ClusterResourceQuotas
+ repeated ClusterResourceQuota items = 2;
+}
+
+// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector
+// must present. If only one is present, it is the only selection criteria. If both are specified,
+// the project must match both restrictions.
+message ClusterResourceQuotaSelector {
+ // LabelSelector is used to select projects by label.
+ // +optional
+ // +nullable
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 1;
+
+ // AnnotationSelector is used to select projects by annotation.
+ // +optional
+ // +nullable
+ map annotations = 2;
+}
+
+// ClusterResourceQuotaSpec defines the desired quota restrictions
+message ClusterResourceQuotaSpec {
+ // Selector is the selector used to match projects.
+ // It should only select active projects on the scale of dozens (though it can select
+ // many more less active projects). These projects will contend on object creation through
+ // this resource.
+ optional ClusterResourceQuotaSelector selector = 1;
+
+ // Quota defines the desired quota
+ optional k8s.io.api.core.v1.ResourceQuotaSpec quota = 2;
+}
+
+// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage
+message ClusterResourceQuotaStatus {
+ // Total defines the actual enforced quota and its current usage across all projects
+ optional k8s.io.api.core.v1.ResourceQuotaStatus total = 1;
+
+ // Namespaces slices the usage by project. This division allows for quick resolution of
+ // deletion reconciliation inside of a single project without requiring a recalculation
+ // across all projects. This can be used to pull the deltas for a given project.
+ // +optional
+ // +nullable
+ repeated ResourceQuotaStatusByNamespace namespaces = 2;
+}
+
+// ResourceQuotaStatusByNamespace gives status for a particular project
+message ResourceQuotaStatusByNamespace {
+ // Namespace the project this status applies to
+ optional string namespace = 1;
+
+ // Status indicates how many resources have been consumed by this project
+ optional k8s.io.api.core.v1.ResourceQuotaStatus status = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/quota/v1/legacy.go b/vendor/github.com/openshift/api/quota/v1/legacy.go
new file mode 100644
index 0000000000..402690b5d6
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/legacy.go
@@ -0,0 +1,24 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &ClusterResourceQuota{},
+ &ClusterResourceQuotaList{},
+ &AppliedClusterResourceQuota{},
+ &AppliedClusterResourceQuotaList{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/quota/v1/register.go b/vendor/github.com/openshift/api/quota/v1/register.go
new file mode 100644
index 0000000000..47c774ef23
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/register.go
@@ -0,0 +1,41 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "quota.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &ClusterResourceQuota{},
+ &ClusterResourceQuotaList{},
+ &AppliedClusterResourceQuota{},
+ &AppliedClusterResourceQuotaList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/quota/v1/types.go b/vendor/github.com/openshift/api/quota/v1/types.go
new file mode 100644
index 0000000000..de918711be
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/types.go
@@ -0,0 +1,144 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to
+// synthetic ResourceQuota object to allow quota evaluation re-use.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=clusterresourcequotas,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470
+// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01
+// +openshift:compatibility-gen:level=1
+type ClusterResourceQuota struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired quota
+ Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status defines the actual enforced quota and its current usage
+ Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ClusterResourceQuotaSpec defines the desired quota restrictions
+type ClusterResourceQuotaSpec struct {
+ // Selector is the selector used to match projects.
+ // It should only select active projects on the scale of dozens (though it can select
+ // many more less active projects). These projects will contend on object creation through
+ // this resource.
+ Selector ClusterResourceQuotaSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"`
+
+ // Quota defines the desired quota
+ Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"`
+}
+
+// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector
+// must present. If only one is present, it is the only selection criteria. If both are specified,
+// the project must match both restrictions.
+type ClusterResourceQuotaSelector struct {
+ // LabelSelector is used to select projects by label.
+ // +optional
+ // +nullable
+ LabelSelector *metav1.LabelSelector `json:"labels" protobuf:"bytes,1,opt,name=labels"`
+
+ // AnnotationSelector is used to select projects by annotation.
+ // +optional
+ // +nullable
+ AnnotationSelector map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"`
+}
+
+// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage
+type ClusterResourceQuotaStatus struct {
+ // Total defines the actual enforced quota and its current usage across all projects
+ Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"`
+
+ // Namespaces slices the usage by project. This division allows for quick resolution of
+ // deletion reconciliation inside of a single project without requiring a recalculation
+ // across all projects. This can be used to pull the deltas for a given project.
+ // +optional
+ // +nullable
+ Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ClusterResourceQuotaList is a collection of ClusterResourceQuotas
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ClusterResourceQuotaList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of ClusterResourceQuotas
+ Items []ClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ResourceQuotasStatusByNamespace bundles multiple ResourceQuotaStatusByNamespace
+type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace
+
+// ResourceQuotaStatusByNamespace gives status for a particular project
+type ResourceQuotaStatusByNamespace struct {
+ // Namespace the project this status applies to
+ Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+
+ // Status indicates how many resources have been consumed by this project
+ Status corev1.ResourceQuotaStatus `json:"status" protobuf:"bytes,2,opt,name=status"`
+}
+
+// +genclient
+// +genclient:onlyVerbs=get,list
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection
+// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to
+// his project and their associated usage.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type AppliedClusterResourceQuota struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired quota
+ Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status defines the actual enforced quota and its current usage
+ Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type AppliedClusterResourceQuotaList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of AppliedClusterResourceQuota
+ Items []AppliedClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..72ac882fbd
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go
@@ -0,0 +1,242 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppliedClusterResourceQuota) DeepCopyInto(out *AppliedClusterResourceQuota) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuota.
+func (in *AppliedClusterResourceQuota) DeepCopy() *AppliedClusterResourceQuota {
+ if in == nil {
+ return nil
+ }
+ out := new(AppliedClusterResourceQuota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AppliedClusterResourceQuota) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppliedClusterResourceQuotaList) DeepCopyInto(out *AppliedClusterResourceQuotaList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AppliedClusterResourceQuota, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuotaList.
+func (in *AppliedClusterResourceQuotaList) DeepCopy() *AppliedClusterResourceQuotaList {
+ if in == nil {
+ return nil
+ }
+ out := new(AppliedClusterResourceQuotaList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AppliedClusterResourceQuotaList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterResourceQuota) DeepCopyInto(out *ClusterResourceQuota) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuota.
+func (in *ClusterResourceQuota) DeepCopy() *ClusterResourceQuota {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterResourceQuota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterResourceQuota) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterResourceQuotaList) DeepCopyInto(out *ClusterResourceQuotaList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClusterResourceQuota, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaList.
+func (in *ClusterResourceQuotaList) DeepCopy() *ClusterResourceQuotaList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterResourceQuotaList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClusterResourceQuotaList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterResourceQuotaSelector) DeepCopyInto(out *ClusterResourceQuotaSelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.AnnotationSelector != nil {
+ in, out := &in.AnnotationSelector, &out.AnnotationSelector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSelector.
+func (in *ClusterResourceQuotaSelector) DeepCopy() *ClusterResourceQuotaSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterResourceQuotaSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterResourceQuotaSpec) DeepCopyInto(out *ClusterResourceQuotaSpec) {
+ *out = *in
+ in.Selector.DeepCopyInto(&out.Selector)
+ in.Quota.DeepCopyInto(&out.Quota)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSpec.
+func (in *ClusterResourceQuotaSpec) DeepCopy() *ClusterResourceQuotaSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterResourceQuotaSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterResourceQuotaStatus) DeepCopyInto(out *ClusterResourceQuotaStatus) {
+ *out = *in
+ in.Total.DeepCopyInto(&out.Total)
+ if in.Namespaces != nil {
+ in, out := &in.Namespaces, &out.Namespaces
+ *out = make(ResourceQuotasStatusByNamespace, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaStatus.
+func (in *ClusterResourceQuotaStatus) DeepCopy() *ClusterResourceQuotaStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClusterResourceQuotaStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaStatusByNamespace) DeepCopyInto(out *ResourceQuotaStatusByNamespace) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatusByNamespace.
+func (in *ResourceQuotaStatusByNamespace) DeepCopy() *ResourceQuotaStatusByNamespace {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceQuotaStatusByNamespace)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ResourceQuotasStatusByNamespace) DeepCopyInto(out *ResourceQuotasStatusByNamespace) {
+ {
+ in := &in
+ *out = make(ResourceQuotasStatusByNamespace, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotasStatusByNamespace.
+func (in ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace {
+ if in == nil {
+ return nil
+ }
+ out := new(ResourceQuotasStatusByNamespace)
+ in.DeepCopyInto(out)
+ return *out
+}
diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..50caedde1c
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,21 @@
+clusterresourcequotas.quota.openshift.io:
+ Annotations: {}
+ ApprovedPRNumber: https://github.com/openshift/api/pull/470
+ CRDName: clusterresourcequotas.quota.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: config-operator
+ FilenameOperatorOrdering: "01"
+ FilenameRunLevel: "0000_03"
+ GroupName: quota.openshift.io
+ HasStatus: true
+ KindName: ClusterResourceQuota
+ Labels: {}
+ PluralName: clusterresourcequotas
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..3072671c53
--- /dev/null
+++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,96 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_AppliedClusterResourceQuota = map[string]string{
+ "": "AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to his project and their associated usage.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the desired quota",
+ "status": "Status defines the actual enforced quota and its current usage",
+}
+
+func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string {
+ return map_AppliedClusterResourceQuota
+}
+
+var map_AppliedClusterResourceQuotaList = map[string]string{
+ "": "AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of AppliedClusterResourceQuota",
+}
+
+func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string {
+ return map_AppliedClusterResourceQuotaList
+}
+
+var map_ClusterResourceQuota = map[string]string{
+ "": "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the desired quota",
+ "status": "Status defines the actual enforced quota and its current usage",
+}
+
+func (ClusterResourceQuota) SwaggerDoc() map[string]string {
+ return map_ClusterResourceQuota
+}
+
+var map_ClusterResourceQuotaList = map[string]string{
+ "": "ClusterResourceQuotaList is a collection of ClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of ClusterResourceQuotas",
+}
+
+func (ClusterResourceQuotaList) SwaggerDoc() map[string]string {
+ return map_ClusterResourceQuotaList
+}
+
+var map_ClusterResourceQuotaSelector = map[string]string{
+ "": "ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector must present. If only one is present, it is the only selection criteria. If both are specified, the project must match both restrictions.",
+ "labels": "LabelSelector is used to select projects by label.",
+ "annotations": "AnnotationSelector is used to select projects by annotation.",
+}
+
+func (ClusterResourceQuotaSelector) SwaggerDoc() map[string]string {
+ return map_ClusterResourceQuotaSelector
+}
+
+var map_ClusterResourceQuotaSpec = map[string]string{
+ "": "ClusterResourceQuotaSpec defines the desired quota restrictions",
+ "selector": "Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.",
+ "quota": "Quota defines the desired quota",
+}
+
+func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string {
+ return map_ClusterResourceQuotaSpec
+}
+
+var map_ClusterResourceQuotaStatus = map[string]string{
+ "": "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage",
+ "total": "Total defines the actual enforced quota and its current usage across all projects",
+ "namespaces": "Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.",
+}
+
+func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string {
+ return map_ClusterResourceQuotaStatus
+}
+
+var map_ResourceQuotaStatusByNamespace = map[string]string{
+ "": "ResourceQuotaStatusByNamespace gives status for a particular project",
+ "namespace": "Namespace the project this status applies to",
+ "status": "Status indicates how many resources have been consumed by this project",
+}
+
+func (ResourceQuotaStatusByNamespace) SwaggerDoc() map[string]string {
+ return map_ResourceQuotaStatusByNamespace
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/route/.codegen.yaml b/vendor/github.com/openshift/api/route/.codegen.yaml
new file mode 100644
index 0000000000..65cf5d814b
--- /dev/null
+++ b/vendor/github.com/openshift/api/route/.codegen.yaml
@@ -0,0 +1,3 @@
+schemapatch:
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/route/OWNERS b/vendor/github.com/openshift/api/route/OWNERS
new file mode 100644
index 0000000000..74038975d3
--- /dev/null
+++ b/vendor/github.com/openshift/api/route/OWNERS
@@ -0,0 +1,5 @@
+reviewers:
+ - ironcladlou
+ - knobunc
+ - pravisankar
+ - Miciah
diff --git a/vendor/github.com/openshift/api/route/install.go b/vendor/github.com/openshift/api/route/install.go
new file mode 100644
index 0000000000..a08536283b
--- /dev/null
+++ b/vendor/github.com/openshift/api/route/install.go
@@ -0,0 +1,26 @@
+package route
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ routev1 "github.com/openshift/api/route/v1"
+)
+
+const (
+ GroupName = "route.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(routev1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/samples/.codegen.yaml b/vendor/github.com/openshift/api/samples/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/samples/install.go b/vendor/github.com/openshift/api/samples/install.go
new file mode 100644
index 0000000000..8ad4d81978
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/install.go
@@ -0,0 +1,26 @@
+package samples
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ samplesv1 "github.com/openshift/api/samples/v1"
+)
+
+const (
+ GroupName = "samples.operator.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(samplesv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/samples/v1/Makefile b/vendor/github.com/openshift/api/samples/v1/Makefile
new file mode 100644
index 0000000000..be24ecca02
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="samples.operator.openshift.io/v1"
diff --git a/vendor/github.com/openshift/api/samples/v1/doc.go b/vendor/github.com/openshift/api/samples/v1/doc.go
new file mode 100644
index 0000000000..d63c96b778
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=samples.operator.openshift.io
+// Package v1 ist he v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/samples/v1/generated.pb.go b/vendor/github.com/openshift/api/samples/v1/generated.pb.go
new file mode 100644
index 0000000000..2459626ce1
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/generated.pb.go
@@ -0,0 +1,1897 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/samples/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_openshift_api_operator_v1 "github.com/openshift/api/operator/v1"
+
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Config) Reset() { *m = Config{} }
+func (*Config) ProtoMessage() {}
+func (*Config) Descriptor() ([]byte, []int) {
+ return fileDescriptor_67d62912ac03ce1e, []int{0}
+}
+func (m *Config) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Config) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Config.Merge(m, src)
+}
+func (m *Config) XXX_Size() int {
+ return m.Size()
+}
+func (m *Config) XXX_DiscardUnknown() {
+ xxx_messageInfo_Config.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Config proto.InternalMessageInfo
+
+func (m *ConfigCondition) Reset() { *m = ConfigCondition{} }
+func (*ConfigCondition) ProtoMessage() {}
+func (*ConfigCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_67d62912ac03ce1e, []int{1}
+}
+func (m *ConfigCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfigCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ConfigCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfigCondition.Merge(m, src)
+}
+func (m *ConfigCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfigCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfigCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigCondition proto.InternalMessageInfo
+
+func (m *ConfigList) Reset() { *m = ConfigList{} }
+func (*ConfigList) ProtoMessage() {}
+func (*ConfigList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_67d62912ac03ce1e, []int{2}
+}
+func (m *ConfigList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ConfigList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfigList.Merge(m, src)
+}
+func (m *ConfigList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfigList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfigList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigList proto.InternalMessageInfo
+
+func (m *ConfigSpec) Reset() { *m = ConfigSpec{} }
+func (*ConfigSpec) ProtoMessage() {}
+func (*ConfigSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_67d62912ac03ce1e, []int{3}
+}
+func (m *ConfigSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ConfigSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfigSpec.Merge(m, src)
+}
+func (m *ConfigSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfigSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfigSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo
+
+func (m *ConfigStatus) Reset() { *m = ConfigStatus{} }
+func (*ConfigStatus) ProtoMessage() {}
+func (*ConfigStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_67d62912ac03ce1e, []int{4}
+}
+func (m *ConfigStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ConfigStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfigStatus.Merge(m, src)
+}
+func (m *ConfigStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfigStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfigStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigStatus proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Config)(nil), "github.com.openshift.api.samples.v1.Config")
+ proto.RegisterType((*ConfigCondition)(nil), "github.com.openshift.api.samples.v1.ConfigCondition")
+ proto.RegisterType((*ConfigList)(nil), "github.com.openshift.api.samples.v1.ConfigList")
+ proto.RegisterType((*ConfigSpec)(nil), "github.com.openshift.api.samples.v1.ConfigSpec")
+ proto.RegisterType((*ConfigStatus)(nil), "github.com.openshift.api.samples.v1.ConfigStatus")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/samples/v1/generated.proto", fileDescriptor_67d62912ac03ce1e)
+}
+
+var fileDescriptor_67d62912ac03ce1e = []byte{
+ // 843 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x96, 0xcd, 0x6e, 0x23, 0x45,
+ 0x10, 0xc7, 0xed, 0xf8, 0x2b, 0xdb, 0xbb, 0x89, 0x93, 0x0e, 0x62, 0x47, 0x39, 0xcc, 0xac, 0x1c,
+ 0x69, 0x95, 0x05, 0xd1, 0x43, 0x96, 0x88, 0xe5, 0xc8, 0x3a, 0x17, 0x22, 0x25, 0x2c, 0x74, 0x0c,
+ 0x12, 0x88, 0x03, 0x9d, 0x71, 0x65, 0xdc, 0x9b, 0xcc, 0x87, 0xa6, 0xdb, 0x96, 0x7c, 0xe3, 0x11,
+ 0x38, 0xf2, 0x06, 0xbc, 0x04, 0x0f, 0x90, 0x1b, 0x7b, 0xdc, 0xd3, 0x88, 0x0c, 0x12, 0x0f, 0x91,
+ 0x13, 0xea, 0x9e, 0x9e, 0x89, 0xbf, 0x56, 0xd8, 0x0a, 0xd2, 0xde, 0xdc, 0x55, 0xf5, 0xff, 0x55,
+ 0xcd, 0xf4, 0xdf, 0x65, 0xa3, 0xcf, 0x7c, 0x2e, 0x07, 0xc3, 0x73, 0xe2, 0x45, 0x81, 0x1b, 0xc5,
+ 0x10, 0x8a, 0x01, 0xbf, 0x90, 0x2e, 0x8b, 0xb9, 0x2b, 0x58, 0x10, 0x5f, 0x81, 0x70, 0x47, 0x07,
+ 0xae, 0x0f, 0x21, 0x24, 0x4c, 0x42, 0x9f, 0xc4, 0x49, 0x24, 0x23, 0xbc, 0x77, 0x27, 0x22, 0xa5,
+ 0x88, 0xb0, 0x98, 0x13, 0x23, 0x22, 0xa3, 0x83, 0xdd, 0x4f, 0x26, 0xc8, 0x7e, 0xe4, 0x47, 0xae,
+ 0xd6, 0x9e, 0x0f, 0x2f, 0xf4, 0x49, 0x1f, 0xf4, 0xa7, 0x9c, 0xb9, 0xdb, 0xb9, 0xfc, 0x42, 0x10,
+ 0x1e, 0xe9, 0xd6, 0x5e, 0x94, 0xc0, 0x82, 0xbe, 0xbb, 0x87, 0x77, 0x35, 0x01, 0xf3, 0x06, 0x3c,
+ 0x84, 0x64, 0xec, 0xc6, 0x97, 0xbe, 0x0a, 0x08, 0x37, 0x00, 0xc9, 0x16, 0xa9, 0xdc, 0x77, 0xa9,
+ 0x92, 0x61, 0x28, 0x79, 0x00, 0x73, 0x82, 0xcf, 0xff, 0x4b, 0x20, 0xbc, 0x01, 0x04, 0x6c, 0x56,
+ 0xd7, 0xf9, 0x6d, 0x0d, 0x35, 0x8f, 0xa2, 0xf0, 0x82, 0xfb, 0xf8, 0x67, 0xb4, 0xae, 0xc6, 0xe9,
+ 0x33, 0xc9, 0xac, 0xea, 0x93, 0xea, 0xfe, 0xc3, 0xe7, 0x9f, 0x92, 0x9c, 0x4a, 0x26, 0xa9, 0x24,
+ 0xbe, 0xf4, 0x55, 0x40, 0x10, 0x55, 0x4d, 0x46, 0x07, 0xe4, 0xd5, 0xf9, 0x6b, 0xf0, 0xe4, 0x29,
+ 0x48, 0xd6, 0xc5, 0xd7, 0xa9, 0x53, 0xc9, 0x52, 0x07, 0xdd, 0xc5, 0x68, 0x49, 0xc5, 0xdf, 0xa2,
+ 0xba, 0x88, 0xc1, 0xb3, 0xd6, 0x34, 0xdd, 0x25, 0x4b, 0x5c, 0x09, 0xc9, 0x87, 0x3b, 0x8b, 0xc1,
+ 0xeb, 0x3e, 0x32, 0xf0, 0xba, 0x3a, 0x51, 0x8d, 0xc2, 0x3f, 0xa0, 0xa6, 0x90, 0x4c, 0x0e, 0x85,
+ 0x55, 0xd3, 0xd0, 0x83, 0x55, 0xa0, 0x5a, 0xd8, 0xdd, 0x34, 0xd8, 0x66, 0x7e, 0xa6, 0x06, 0xd8,
+ 0xf9, 0xb3, 0x86, 0xda, 0x79, 0xe1, 0x51, 0x14, 0xf6, 0xb9, 0xe4, 0x51, 0x88, 0x5f, 0xa0, 0xba,
+ 0x1c, 0xc7, 0xa0, 0xdf, 0xcf, 0x83, 0xee, 0x5e, 0x31, 0x50, 0x6f, 0x1c, 0xc3, 0x6d, 0xea, 0xec,
+ 0xcc, 0x94, 0xab, 0x30, 0xd5, 0x02, 0x7c, 0x52, 0xce, 0xb9, 0xa6, 0xa5, 0x87, 0xd3, 0x4d, 0x6f,
+ 0x53, 0x67, 0x81, 0x99, 0x48, 0x49, 0x9a, 0x1e, 0x0d, 0xbf, 0x46, 0x9b, 0x57, 0x4c, 0xc8, 0xef,
+ 0xe2, 0x3e, 0x93, 0xd0, 0xe3, 0x01, 0x98, 0xa7, 0xff, 0x68, 0xb9, 0x0b, 0x53, 0x8a, 0xee, 0x87,
+ 0x66, 0x82, 0xcd, 0x93, 0x29, 0x12, 0x9d, 0x21, 0xe3, 0x11, 0xc2, 0x2a, 0xd2, 0x4b, 0x58, 0x28,
+ 0xf2, 0xa7, 0x52, 0xfd, 0xea, 0x2b, 0xf7, 0xdb, 0x35, 0xfd, 0xf0, 0xc9, 0x1c, 0x8d, 0x2e, 0xe8,
+ 0x80, 0x9f, 0xa2, 0x66, 0x02, 0x4c, 0x44, 0xa1, 0xd5, 0xd0, 0x6f, 0xac, 0xbc, 0x26, 0xaa, 0xa3,
+ 0xd4, 0x64, 0xf1, 0x33, 0xd4, 0x0a, 0x40, 0x08, 0xe6, 0x83, 0xd5, 0xd4, 0x85, 0x6d, 0x53, 0xd8,
+ 0x3a, 0xcd, 0xc3, 0xb4, 0xc8, 0x77, 0xfe, 0xa8, 0x22, 0x94, 0x5f, 0xd1, 0x09, 0x17, 0x12, 0xff,
+ 0x34, 0x67, 0x78, 0xb2, 0xdc, 0xf3, 0x28, 0xb5, 0xb6, 0xfb, 0x96, 0x69, 0xb5, 0x5e, 0x44, 0x26,
+ 0xcc, 0xfe, 0x0d, 0x6a, 0x70, 0x09, 0x81, 0xba, 0xf0, 0xda, 0xfe, 0xc3, 0xe7, 0x1f, 0xaf, 0x60,
+ 0xcc, 0xee, 0x86, 0xe1, 0x36, 0x8e, 0x15, 0x81, 0xe6, 0xa0, 0xce, 0x3f, 0xb5, 0x62, 0x7c, 0xf5,
+ 0x05, 0xc0, 0x63, 0xd4, 0x0e, 0x58, 0xc8, 0x7c, 0x08, 0x20, 0x94, 0xca, 0x20, 0x85, 0x2d, 0x5f,
+ 0x19, 0x75, 0xfb, 0x74, 0x3a, 0x7d, 0x9b, 0x3a, 0x87, 0xef, 0x5c, 0x9d, 0x51, 0xac, 0xf6, 0x42,
+ 0x94, 0x28, 0xdb, 0xcd, 0xe8, 0xe8, 0x6c, 0x1f, 0xfc, 0x12, 0xb5, 0xcd, 0xd0, 0x14, 0x7c, 0x2e,
+ 0x64, 0x32, 0x36, 0xb6, 0x7e, 0x5c, 0xb4, 0x3e, 0x9b, 0x4e, 0xd3, 0xd9, 0x7a, 0xfc, 0x02, 0x6d,
+ 0xb0, 0xc4, 0x1b, 0x70, 0x09, 0x9e, 0x1c, 0x26, 0x20, 0xac, 0xfa, 0x93, 0xda, 0xfe, 0x83, 0xee,
+ 0x76, 0x96, 0x3a, 0x1b, 0x2f, 0x27, 0x13, 0x74, 0xba, 0x0e, 0x1f, 0xa3, 0x1d, 0x71, 0xc9, 0xe3,
+ 0x18, 0xfa, 0xc7, 0x01, 0xf3, 0x41, 0xc8, 0x04, 0x58, 0x20, 0xac, 0x86, 0x96, 0x3f, 0xce, 0x52,
+ 0x67, 0xe7, 0x6c, 0x3e, 0x4d, 0x17, 0x69, 0xf0, 0x97, 0x68, 0xcb, 0x84, 0x7b, 0x10, 0xc4, 0x57,
+ 0x4c, 0x82, 0xb0, 0x9a, 0x9a, 0xf3, 0x41, 0x96, 0x3a, 0x5b, 0x67, 0x33, 0x39, 0x3a, 0x57, 0x8d,
+ 0x7b, 0x68, 0xdb, 0xc4, 0x06, 0x70, 0x15, 0x1c, 0x0d, 0x58, 0x22, 0x85, 0xd5, 0xd2, 0x88, 0xa7,
+ 0x59, 0xea, 0x6c, 0x1b, 0xc4, 0x57, 0x65, 0xf2, 0x36, 0x75, 0x36, 0xca, 0xd3, 0xd7, 0x2c, 0x00,
+ 0x3a, 0x0f, 0xe8, 0xfc, 0x5e, 0x47, 0x8f, 0x26, 0x57, 0xd4, 0xfb, 0xbc, 0xea, 0x01, 0x42, 0x5e,
+ 0xb1, 0x85, 0x0a, 0x2f, 0x1f, 0xae, 0xe0, 0xe5, 0x72, 0x85, 0xdd, 0xfd, 0x36, 0x94, 0x21, 0x41,
+ 0x27, 0xd8, 0x8b, 0x4c, 0x55, 0xbb, 0xaf, 0xa9, 0x1a, 0xf7, 0x33, 0x55, 0xf3, 0x7f, 0x32, 0x55,
+ 0x6b, 0x25, 0x53, 0x3d, 0x43, 0xad, 0x11, 0x24, 0x82, 0x47, 0xa1, 0xb5, 0x3e, 0xbd, 0xd1, 0xbe,
+ 0xcf, 0xc3, 0xb4, 0xc8, 0x77, 0x8f, 0xaf, 0x6f, 0xec, 0xca, 0x9b, 0x1b, 0xbb, 0xf2, 0xf6, 0xc6,
+ 0xae, 0xfc, 0x92, 0xd9, 0xd5, 0xeb, 0xcc, 0xae, 0xbe, 0xc9, 0xec, 0xea, 0xdb, 0xcc, 0xae, 0xfe,
+ 0x95, 0xd9, 0xd5, 0x5f, 0xff, 0xb6, 0x2b, 0x3f, 0xee, 0x2d, 0xf1, 0x87, 0xe9, 0xdf, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0x92, 0x4a, 0x67, 0x37, 0x56, 0x09, 0x00, 0x00,
+}
+
+func (m *Config) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Config) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Config) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x2a
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.SkippedHelmCharts) > 0 {
+ for iNdEx := len(m.SkippedHelmCharts) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SkippedHelmCharts[iNdEx])
+ copy(dAtA[i:], m.SkippedHelmCharts[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedHelmCharts[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.SkippedTemplates) > 0 {
+ for iNdEx := len(m.SkippedTemplates) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SkippedTemplates[iNdEx])
+ copy(dAtA[i:], m.SkippedTemplates[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedTemplates[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.SkippedImagestreams) > 0 {
+ for iNdEx := len(m.SkippedImagestreams) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SkippedImagestreams[iNdEx])
+ copy(dAtA[i:], m.SkippedImagestreams[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedImagestreams[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Architectures) > 0 {
+ for iNdEx := len(m.Architectures) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Architectures[iNdEx])
+ copy(dAtA[i:], m.Architectures[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architectures[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i -= len(m.SamplesRegistry)
+ copy(dAtA[i:], m.SamplesRegistry)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SamplesRegistry)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.ManagementState)
+ copy(dAtA[i:], m.ManagementState)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ManagementState)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x42
+ if len(m.SkippedTemplates) > 0 {
+ for iNdEx := len(m.SkippedTemplates) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SkippedTemplates[iNdEx])
+ copy(dAtA[i:], m.SkippedTemplates[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedTemplates[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.SkippedImagestreams) > 0 {
+ for iNdEx := len(m.SkippedImagestreams) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.SkippedImagestreams[iNdEx])
+ copy(dAtA[i:], m.SkippedImagestreams[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedImagestreams[iNdEx])))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.Architectures) > 0 {
+ for iNdEx := len(m.Architectures) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Architectures[iNdEx])
+ copy(dAtA[i:], m.Architectures[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architectures[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ i -= len(m.SamplesRegistry)
+ copy(dAtA[i:], m.SamplesRegistry)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SamplesRegistry)))
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.ManagementState)
+ copy(dAtA[i:], m.ManagementState)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ManagementState)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Config) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ConfigCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ConfigList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ConfigSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ManagementState)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SamplesRegistry)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Architectures) > 0 {
+ for _, s := range m.Architectures {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.SkippedImagestreams) > 0 {
+ for _, s := range m.SkippedImagestreams {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.SkippedTemplates) > 0 {
+ for _, s := range m.SkippedTemplates {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.SkippedHelmCharts) > 0 {
+ for _, s := range m.SkippedHelmCharts {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ConfigStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ManagementState)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.SamplesRegistry)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Architectures) > 0 {
+ for _, s := range m.Architectures {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.SkippedImagestreams) > 0 {
+ for _, s := range m.SkippedImagestreams {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.SkippedTemplates) > 0 {
+ for _, s := range m.SkippedTemplates {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Config) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Config{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ConfigSpec", "ConfigSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ConfigStatus", "ConfigStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ConfigCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ConfigCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ConfigList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Config{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Config", "Config", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ConfigList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ConfigSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ConfigSpec{`,
+ `ManagementState:` + fmt.Sprintf("%v", this.ManagementState) + `,`,
+ `SamplesRegistry:` + fmt.Sprintf("%v", this.SamplesRegistry) + `,`,
+ `Architectures:` + fmt.Sprintf("%v", this.Architectures) + `,`,
+ `SkippedImagestreams:` + fmt.Sprintf("%v", this.SkippedImagestreams) + `,`,
+ `SkippedTemplates:` + fmt.Sprintf("%v", this.SkippedTemplates) + `,`,
+ `SkippedHelmCharts:` + fmt.Sprintf("%v", this.SkippedHelmCharts) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ConfigStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]ConfigCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ConfigCondition", "ConfigCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ConfigStatus{`,
+ `ManagementState:` + fmt.Sprintf("%v", this.ManagementState) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `SamplesRegistry:` + fmt.Sprintf("%v", this.SamplesRegistry) + `,`,
+ `Architectures:` + fmt.Sprintf("%v", this.Architectures) + `,`,
+ `SkippedImagestreams:` + fmt.Sprintf("%v", this.SkippedImagestreams) + `,`,
+ `SkippedTemplates:` + fmt.Sprintf("%v", this.SkippedTemplates) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Config) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Config: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = ConfigConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Config{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ManagementState", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ManagementState = github_com_openshift_api_operator_v1.ManagementState(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SamplesRegistry", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SamplesRegistry = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Architectures", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Architectures = append(m.Architectures, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SkippedImagestreams", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SkippedImagestreams = append(m.SkippedImagestreams, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SkippedTemplates", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SkippedTemplates = append(m.SkippedTemplates, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SkippedHelmCharts", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SkippedHelmCharts = append(m.SkippedHelmCharts, HelmChartName(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfigStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfigStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ManagementState", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ManagementState = github_com_openshift_api_operator_v1.ManagementState(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, ConfigCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SamplesRegistry", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SamplesRegistry = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Architectures", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Architectures = append(m.Architectures, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SkippedImagestreams", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SkippedImagestreams = append(m.SkippedImagestreams, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SkippedTemplates", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SkippedTemplates = append(m.SkippedTemplates, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/samples/v1/generated.proto b/vendor/github.com/openshift/api/samples/v1/generated.proto
new file mode 100644
index 0000000000..28bbf75126
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/generated.proto
@@ -0,0 +1,182 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.samples.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/samples/v1";
+
+// Config contains the configuration and detailed condition status for the Samples Operator.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:path=configs,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/513
+// +openshift:file-pattern=operatorOrdering=00
+// +kubebuilder:metadata:annotations="description=Extension for configuring openshift samples operator."
+// +kubebuilder:metadata:annotations="displayName=ConfigsSamples"
+message Config {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // +kubebuilder:validation:Required
+ // +required
+ optional ConfigSpec spec = 2;
+
+ // +optional
+ optional ConfigStatus status = 3;
+}
+
+// ConfigCondition captures various conditions of the Config
+// as entries are processed.
+message ConfigCondition {
+ // type of condition.
+ optional string type = 1;
+
+ // status of the condition, one of True, False, Unknown.
+ optional string status = 2;
+
+ // lastUpdateTime is the last time this condition was updated.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3;
+
+ // lastTransitionTime is the last time the condition transitioned from one status to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+ // reason is what caused the condition's last transition.
+ optional string reason = 5;
+
+ // message is a human readable message indicating details about the transition.
+ optional string message = 6;
+}
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message ConfigList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ repeated Config items = 2;
+}
+
+// ConfigSpec contains the desired configuration and state for the Samples Operator, controlling
+// various behavior around the imagestreams and templates it creates/updates in the
+// openshift namespace.
+message ConfigSpec {
+ // managementState is top level on/off type of switch for all operators.
+ // When "Managed", this operator processes config and manipulates the samples accordingly.
+ // When "Unmanaged", this operator ignores any updates to the resources it watches.
+ // When "Removed", it reacts that same wasy as it does if the Config object
+ // is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped
+ // lists) and the registry secret are deleted, along with the ConfigMap in the operator's
+ // namespace that represents the last config used to manipulate the samples,
+ optional string managementState = 1;
+
+ // samplesRegistry allows for the specification of which registry is accessed
+ // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library
+ // that are pulled into this github repository, but based on our pulling only ocp content it typically
+ // defaults to registry.redhat.io.
+ optional string samplesRegistry = 2;
+
+ // architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only
+ // supported choices currently.
+ repeated string architectures = 4;
+
+ // skippedImagestreams specifies names of image streams that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ repeated string skippedImagestreams = 5;
+
+ // skippedTemplates specifies names of templates that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ repeated string skippedTemplates = 6;
+
+ // skippedHelmCharts specifies names of helm charts that should NOT be
+ // managed. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to MANUALLY DELETE the
+ // content but the operator will not recreate(or update) anything
+ // listed here. Few examples of the name of helmcharts which can be skipped are
+ // 'redhat-redhat-perl-imagestreams','redhat-redhat-nodejs-imagestreams','redhat-nginx-imagestreams',
+ // 'redhat-redhat-ruby-imagestreams','redhat-redhat-python-imagestreams','redhat-redhat-php-imagestreams',
+ // 'redhat-httpd-imagestreams','redhat-redhat-dotnet-imagestreams'. Rest of the names can be obtained from
+ // openshift console --> helmcharts -->installed helmcharts. This will display the list of all the
+ // 12 helmcharts(of imagestreams)being installed by Samples Operator. The skippedHelmCharts must be a
+ // valid Kubernetes resource name. May contain only lowercase alphanumeric characters, hyphens and periods,
+ // and each period separated segment must begin and end with an alphanumeric character. It must be non-empty
+ // and at most 253 characters in length
+ // +listType=set
+ // +kubebuilder:validation:MaxItems=16
+ // +kubebuilder:validation:XValidation:rule="self.all(x, x.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'))",message="skippedHelmCharts must be a valid Kubernetes resource name. May contain only lowercase alphanumeric characters, hyphens and periods, and each period separated segment must begin and end with an alphanumeric character"
+ repeated string skippedhelmCharts = 7;
+}
+
+// ConfigStatus contains the actual configuration in effect, as well as various details
+// that describe the state of the Samples Operator.
+message ConfigStatus {
+ // managementState reflects the current operational status of the on/off switch for
+ // the operator. This operator compares the ManagementState as part of determining that we are turning
+ // the operator back on (i.e. "Managed") when it was previously "Unmanaged".
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ optional string managementState = 1;
+
+ // conditions represents the available maintenance status of the sample
+ // imagestreams and templates.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated ConfigCondition conditions = 2;
+
+ // samplesRegistry allows for the specification of which registry is accessed
+ // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library
+ // that are pulled into this github repository, but based on our pulling only ocp content it typically
+ // defaults to registry.redhat.io.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ optional string samplesRegistry = 3;
+
+ // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the
+ // supported choices.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated string architectures = 5;
+
+ // skippedImagestreams specifies names of image streams that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated string skippedImagestreams = 6;
+
+ // skippedTemplates specifies names of templates that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ repeated string skippedTemplates = 7;
+
+ // version is the value of the operator's payload based version indicator when it was last successfully processed
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ optional string version = 8;
+}
+
diff --git a/vendor/github.com/openshift/api/samples/v1/register.go b/vendor/github.com/openshift/api/samples/v1/register.go
new file mode 100644
index 0000000000..3b0611e3f8
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/register.go
@@ -0,0 +1,51 @@
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ Version = "v1"
+ GroupName = "samples.operator.openshift.io"
+)
+
+var (
+ scheme = runtime.NewScheme()
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version}
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = SchemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+func init() {
+ AddToScheme(scheme)
+}
+
+// addKnownTypes adds the set of types defined in this package to the supplied scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Config{},
+ &ConfigList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
diff --git a/vendor/github.com/openshift/api/samples/v1/types_config.go b/vendor/github.com/openshift/api/samples/v1/types_config.go
new file mode 100644
index 0000000000..c4bf380439
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/types_config.go
@@ -0,0 +1,271 @@
+package v1
+
+import (
+ operatorv1 "github.com/openshift/api/operator/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Config contains the configuration and detailed condition status for the Samples Operator.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:path=configs,scope=Cluster
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/513
+// +openshift:file-pattern=operatorOrdering=00
+// +kubebuilder:metadata:annotations="description=Extension for configuring openshift samples operator."
+// +kubebuilder:metadata:annotations="displayName=ConfigsSamples"
+type Config struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+
+ // +kubebuilder:validation:Required
+ // +required
+ Spec ConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // +optional
+ Status ConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ConfigSpec contains the desired configuration and state for the Samples Operator, controlling
+// various behavior around the imagestreams and templates it creates/updates in the
+// openshift namespace.
+type ConfigSpec struct {
+ // managementState is top level on/off type of switch for all operators.
+ // When "Managed", this operator processes config and manipulates the samples accordingly.
+ // When "Unmanaged", this operator ignores any updates to the resources it watches.
+ // When "Removed", it reacts that same wasy as it does if the Config object
+ // is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped
+ // lists) and the registry secret are deleted, along with the ConfigMap in the operator's
+ // namespace that represents the last config used to manipulate the samples,
+ ManagementState operatorv1.ManagementState `json:"managementState,omitempty" protobuf:"bytes,1,opt,name=managementState"`
+
+ // samplesRegistry allows for the specification of which registry is accessed
+ // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library
+ // that are pulled into this github repository, but based on our pulling only ocp content it typically
+ // defaults to registry.redhat.io.
+ SamplesRegistry string `json:"samplesRegistry,omitempty" protobuf:"bytes,2,opt,name=samplesRegistry"`
+
+ // architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only
+ // supported choices currently.
+ Architectures []string `json:"architectures,omitempty" protobuf:"bytes,4,opt,name=architectures"`
+
+ // skippedImagestreams specifies names of image streams that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ SkippedImagestreams []string `json:"skippedImagestreams,omitempty" protobuf:"bytes,5,opt,name=skippedImagestreams"`
+
+ // skippedTemplates specifies names of templates that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ SkippedTemplates []string `json:"skippedTemplates,omitempty" protobuf:"bytes,6,opt,name=skippedTemplates"`
+
+ // skippedHelmCharts specifies names of helm charts that should NOT be
+ // managed. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to MANUALLY DELETE the
+ // content but the operator will not recreate(or update) anything
+ // listed here. Few examples of the name of helmcharts which can be skipped are
+ // 'redhat-redhat-perl-imagestreams','redhat-redhat-nodejs-imagestreams','redhat-nginx-imagestreams',
+ // 'redhat-redhat-ruby-imagestreams','redhat-redhat-python-imagestreams','redhat-redhat-php-imagestreams',
+ // 'redhat-httpd-imagestreams','redhat-redhat-dotnet-imagestreams'. Rest of the names can be obtained from
+ // openshift console --> helmcharts -->installed helmcharts. This will display the list of all the
+ // 12 helmcharts(of imagestreams)being installed by Samples Operator. The skippedHelmCharts must be a
+ // valid Kubernetes resource name. May contain only lowercase alphanumeric characters, hyphens and periods,
+ // and each period separated segment must begin and end with an alphanumeric character. It must be non-empty
+ // and at most 253 characters in length
+ // +listType=set
+ // +kubebuilder:validation:MaxItems=16
+ // +kubebuilder:validation:XValidation:rule="self.all(x, x.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'))",message="skippedHelmCharts must be a valid Kubernetes resource name. May contain only lowercase alphanumeric characters, hyphens and periods, and each period separated segment must begin and end with an alphanumeric character"
+ SkippedHelmCharts []HelmChartName `json:"skippedHelmCharts,omitempty" protobuf:"bytes,7,opt,name=skippedhelmCharts"`
+}
+
+// HelmChartName is a string alias that is used to represent the name of a helm chart.
+// +kubebuilder:validation:MinLength=1
+// +kubebuilder:validation:MaxLength=253
+type HelmChartName string
+
+// ConfigStatus contains the actual configuration in effect, as well as various details
+// that describe the state of the Samples Operator.
+type ConfigStatus struct {
+ // managementState reflects the current operational status of the on/off switch for
+ // the operator. This operator compares the ManagementState as part of determining that we are turning
+ // the operator back on (i.e. "Managed") when it was previously "Unmanaged".
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ ManagementState operatorv1.ManagementState `json:"managementState,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=managementState"`
+ // conditions represents the available maintenance status of the sample
+ // imagestreams and templates.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []ConfigCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+
+ // samplesRegistry allows for the specification of which registry is accessed
+ // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library
+ // that are pulled into this github repository, but based on our pulling only ocp content it typically
+ // defaults to registry.redhat.io.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ SamplesRegistry string `json:"samplesRegistry,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,3,rep,name=samplesRegistry"`
+
+ // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the
+ // supported choices.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Architectures []string `json:"architectures,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=architectures"`
+
+ // skippedImagestreams specifies names of image streams that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ SkippedImagestreams []string `json:"skippedImagestreams,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=skippedImagestreams"`
+
+ // skippedTemplates specifies names of templates that should NOT be
+ // created/updated. Admins can use this to allow them to delete content
+ // they don’t want. They will still have to manually delete the
+ // content but the operator will not recreate(or update) anything
+ // listed here.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ SkippedTemplates []string `json:"skippedTemplates,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,7,rep,name=skippedTemplates"`
+
+ // version is the value of the operator's payload based version indicator when it was last successfully processed
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Version string `json:"version,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=version"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type ConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+ Items []Config `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+const (
+ // SamplesRegistryCredentials is the name for a secret that contains a username+password/token
+ // for the registry, where if the secret is present, will be used for authentication.
+ // The corresponding secret is required to already be formatted as a
+ // dockerconfig secret so that it can just be copied
+ // to the openshift namespace
+ // for use during imagestream import.
+ SamplesRegistryCredentials = "samples-registry-credentials"
+ // ConfigName is the name/identifier of the static, singleton operator employed for the samples.
+ ConfigName = "cluster"
+ // X86Architecture is the value used to specify the x86_64 hardware architecture
+ // in the Architectures array field.
+ X86Architecture = "x86_64"
+ // AMDArchitecture is the golang value for x86 64 bit hardware architecture; for the purposes
+ // of this operator, it is equivalent to X86Architecture, which is kept for historical/migration
+ // purposes
+ AMDArchitecture = "amd64"
+ // ARMArchitecture is the value used to specify the aarch64 hardware architecture
+ // in the Architectures array field.
+ ARMArchitecture = "arm64"
+ // PPCArchitecture is the value used to specify the ppc64le hardware architecture
+ // in the Architectures array field.
+ PPCArchitecture = "ppc64le"
+ // S390Architecture is the value used to specify the s390x hardware architecture
+ // in the Architecture array field.
+ S390Architecture = "s390x"
+ // ConfigFinalizer is the text added to the Config.Finalizer field
+ // to enable finalizer processing.
+ ConfigFinalizer = GroupName + "/finalizer"
+ // SamplesManagedLabel is the key for a label added to all the imagestreams and templates
+ // in the openshift namespace that the Config is managing. This label is adjusted
+ // when changes to the SkippedImagestreams and SkippedTemplates fields are made.
+ SamplesManagedLabel = GroupName + "/managed"
+ // SamplesVersionAnnotation is the key for an annotation set on the imagestreams, templates,
+ // and secret that this operator manages that signifies the version of the operator that
+ // last managed the particular resource.
+ SamplesVersionAnnotation = GroupName + "/version"
+ // SamplesRecreateCredentialAnnotation is the key for an annotation set on the secret used
+ // for authentication when configuration moves from Removed to Managed but the associated secret
+ // in the openshift namespace does not exist. This will initiate creation of the credential
+ // in the openshift namespace.
+ SamplesRecreateCredentialAnnotation = GroupName + "/recreate"
+ // OperatorNamespace is the namespace the operator runs in.
+ OperatorNamespace = "openshift-cluster-samples-operator"
+)
+
+type ConfigConditionType string
+
+// the valid conditions of the Config
+
+const (
+ // ImportCredentialsExist represents the state of any credentials specified by
+ // the SamplesRegistry field in the Spec.
+ ImportCredentialsExist ConfigConditionType = "ImportCredentialsExist"
+ // SamplesExist represents whether an incoming Config has been successfully
+ // processed or not all, or whether the last Config to come in has been
+ // successfully processed.
+ SamplesExist ConfigConditionType = "SamplesExist"
+ // ConfigurationValid represents whether the latest Config to come in
+ // tried to make a support configuration change. Currently, changes to the
+ // InstallType and Architectures list after initial processing is not allowed.
+ ConfigurationValid ConfigConditionType = "ConfigurationValid"
+ // ImageChangesInProgress represents the state between where the samples operator has
+ // started updating the imagestreams and when the spec and status generations for each
+ // tag match. The list of imagestreams that are still in progress will be stored
+ // in the Reason field of the condition. The Reason field being empty corresponds
+ // with this condition being marked true.
+ ImageChangesInProgress ConfigConditionType = "ImageChangesInProgress"
+ // RemovePending represents whether the Config Spec ManagementState
+ // has been set to Removed, but we have not completed the deletion of the
+ // samples, pull secret, etc. and set the Config Spec ManagementState to Removed.
+ // Also note, while a samples creation/update cycle is still in progress, and ImageChagesInProgress
+ // is True, the operator will not initiate the deletions, as we
+ // do not want the create/updates and deletes of the samples to be occurring in parallel.
+ // So the actual Removed processing will be initated only after ImageChangesInProgress is set
+ // to false. Once the deletions are done, and the Status ManagementState is Removed, this
+ // condition is set back to False. Lastly, when this condition is set to True, the
+ // ClusterOperator Progressing condition will be set to True.
+ RemovePending ConfigConditionType = "RemovePending"
+ // MigrationInProgress represents the special case where the operator is running off of
+ // a new version of its image, and samples are deployed of a previous version. This condition
+ // facilitates the maintenance of this operator's ClusterOperator object.
+ MigrationInProgress ConfigConditionType = "MigrationInProgress"
+ // ImportImageErrorsExist registers any image import failures, separate from ImageChangeInProgress,
+ // so that we can a) indicate a problem to the ClusterOperator status, b) mark the current
+ // change cycle as complete in both ClusterOperator and Config; retry on import will
+ // occur by the next relist interval if it was an intermittent issue;
+ ImportImageErrorsExist ConfigConditionType = "ImportImageErrorsExist"
+)
+
+// ConfigCondition captures various conditions of the Config
+// as entries are processed.
+type ConfigCondition struct {
+ // type of condition.
+ Type ConfigConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ConfigConditionType"`
+ // status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
+ // lastUpdateTime is the last time this condition was updated.
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,3,opt,name=lastUpdateTime"`
+ // lastTransitionTime is the last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+ // reason is what caused the condition's last transition.
+ Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+ // message is a human readable message indicating details about the transition.
+ Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..30979c0486
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go
@@ -0,0 +1,163 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigCondition) DeepCopyInto(out *ConfigCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigCondition.
+func (in *ConfigCondition) DeepCopy() *ConfigCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigList) DeepCopyInto(out *ConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Config, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList.
+func (in *ConfigList) DeepCopy() *ConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) {
+ *out = *in
+ if in.Architectures != nil {
+ in, out := &in.Architectures, &out.Architectures
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SkippedImagestreams != nil {
+ in, out := &in.SkippedImagestreams, &out.SkippedImagestreams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SkippedTemplates != nil {
+ in, out := &in.SkippedTemplates, &out.SkippedTemplates
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SkippedHelmCharts != nil {
+ in, out := &in.SkippedHelmCharts, &out.SkippedHelmCharts
+ *out = make([]HelmChartName, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec.
+func (in *ConfigSpec) DeepCopy() *ConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]ConfigCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Architectures != nil {
+ in, out := &in.Architectures, &out.Architectures
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SkippedImagestreams != nil {
+ in, out := &in.SkippedImagestreams, &out.SkippedImagestreams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SkippedTemplates != nil {
+ in, out := &in.SkippedTemplates, &out.SkippedTemplates
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus.
+func (in *ConfigStatus) DeepCopy() *ConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/samples/v1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..87bf27b519
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,23 @@
+configs.samples.operator.openshift.io:
+ Annotations:
+ description: Extension for configuring openshift samples operator.
+ displayName: ConfigsSamples
+ ApprovedPRNumber: https://github.com/openshift/api/pull/513
+ CRDName: configs.samples.operator.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: "00"
+ FilenameRunLevel: ""
+ GroupName: samples.operator.openshift.io
+ HasStatus: true
+ KindName: Config
+ Labels: {}
+ PluralName: configs
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1
+
diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..b82e704d87
--- /dev/null
+++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,75 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Config = map[string]string{
+ "": "Config contains the configuration and detailed condition status for the Samples Operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (Config) SwaggerDoc() map[string]string {
+ return map_Config
+}
+
+var map_ConfigCondition = map[string]string{
+ "": "ConfigCondition captures various conditions of the Config as entries are processed.",
+ "type": "type of condition.",
+ "status": "status of the condition, one of True, False, Unknown.",
+ "lastUpdateTime": "lastUpdateTime is the last time this condition was updated.",
+ "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another.",
+ "reason": "reason is what caused the condition's last transition.",
+ "message": "message is a human readable message indicating details about the transition.",
+}
+
+func (ConfigCondition) SwaggerDoc() map[string]string {
+ return map_ConfigCondition
+}
+
+var map_ConfigList = map[string]string{
+ "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ConfigList) SwaggerDoc() map[string]string {
+ return map_ConfigList
+}
+
+var map_ConfigSpec = map[string]string{
+ "": "ConfigSpec contains the desired configuration and state for the Samples Operator, controlling various behavior around the imagestreams and templates it creates/updates in the openshift namespace.",
+ "managementState": "managementState is top level on/off type of switch for all operators. When \"Managed\", this operator processes config and manipulates the samples accordingly. When \"Unmanaged\", this operator ignores any updates to the resources it watches. When \"Removed\", it reacts that same wasy as it does if the Config object is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped lists) and the registry secret are deleted, along with the ConfigMap in the operator's namespace that represents the last config used to manipulate the samples,",
+ "samplesRegistry": "samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io.",
+ "architectures": "architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only supported choices currently.",
+ "skippedImagestreams": "skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.",
+ "skippedTemplates": "skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.",
+ "skippedHelmCharts": "skippedHelmCharts specifies names of helm charts that should NOT be managed. Admins can use this to allow them to delete content they don’t want. They will still have to MANUALLY DELETE the content but the operator will not recreate(or update) anything listed here. Few examples of the name of helmcharts which can be skipped are 'redhat-redhat-perl-imagestreams','redhat-redhat-nodejs-imagestreams','redhat-nginx-imagestreams', 'redhat-redhat-ruby-imagestreams','redhat-redhat-python-imagestreams','redhat-redhat-php-imagestreams', 'redhat-httpd-imagestreams','redhat-redhat-dotnet-imagestreams'. Rest of the names can be obtained from openshift console --> helmcharts -->installed helmcharts. This will display the list of all the 12 helmcharts(of imagestreams)being installed by Samples Operator. The skippedHelmCharts must be a valid Kubernetes resource name. May contain only lowercase alphanumeric characters, hyphens and periods, and each period separated segment must begin and end with an alphanumeric character. It must be non-empty and at most 253 characters in length",
+}
+
+func (ConfigSpec) SwaggerDoc() map[string]string {
+ return map_ConfigSpec
+}
+
+var map_ConfigStatus = map[string]string{
+ "": "ConfigStatus contains the actual configuration in effect, as well as various details that describe the state of the Samples Operator.",
+ "managementState": "managementState reflects the current operational status of the on/off switch for the operator. This operator compares the ManagementState as part of determining that we are turning the operator back on (i.e. \"Managed\") when it was previously \"Unmanaged\".",
+ "conditions": "conditions represents the available maintenance status of the sample imagestreams and templates.",
+ "samplesRegistry": "samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io.",
+ "architectures": "architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the supported choices.",
+ "skippedImagestreams": "skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.",
+ "skippedTemplates": "skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.",
+ "version": "version is the value of the operator's payload based version indicator when it was last successfully processed",
+}
+
+func (ConfigStatus) SwaggerDoc() map[string]string {
+ return map_ConfigStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/security/install.go b/vendor/github.com/openshift/api/security/install.go
new file mode 100644
index 0000000000..c2b04c4329
--- /dev/null
+++ b/vendor/github.com/openshift/api/security/install.go
@@ -0,0 +1,26 @@
+package security
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ securityv1 "github.com/openshift/api/security/v1"
+)
+
+const (
+ GroupName = "security.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(securityv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/servicecertsigner/.codegen.yaml b/vendor/github.com/openshift/api/servicecertsigner/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/servicecertsigner/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/servicecertsigner/install.go b/vendor/github.com/openshift/api/servicecertsigner/install.go
new file mode 100644
index 0000000000..98d891d34d
--- /dev/null
+++ b/vendor/github.com/openshift/api/servicecertsigner/install.go
@@ -0,0 +1,26 @@
+package servicecertsigner
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ servicecertsignerv1alpha1 "github.com/openshift/api/servicecertsigner/v1alpha1"
+)
+
+const (
+ GroupName = "servicecertsigner.config.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(servicecertsignerv1alpha1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go
new file mode 100644
index 0000000000..6ce02bdb3e
--- /dev/null
+++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go
@@ -0,0 +1,6 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=servicecertsigner.config.openshift.io
+package v1alpha1
diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go
new file mode 100644
index 0000000000..19ef421b22
--- /dev/null
+++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go
@@ -0,0 +1,40 @@
+package v1alpha1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ operatorsv1alpha1api "github.com/openshift/api/operator/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "servicecertsigner.config.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install, operatorsv1alpha1api.Install)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &ServiceCertSignerOperatorConfig{},
+ &ServiceCertSignerOperatorConfigList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go
new file mode 100644
index 0000000000..ebd8d75efc
--- /dev/null
+++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go
@@ -0,0 +1,53 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceCertSignerOperatorConfig provides information to configure an operator to manage the service cert signing controllers
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type ServiceCertSignerOperatorConfig struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec ServiceCertSignerOperatorConfigSpec `json:"spec"`
+ Status ServiceCertSignerOperatorConfigStatus `json:"status"`
+}
+
+type ServiceCertSignerOperatorConfigSpec struct {
+ operatorv1.OperatorSpec `json:",inline"`
+}
+
+type ServiceCertSignerOperatorConfigStatus struct {
+ operatorv1.OperatorStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ServiceCertSignerOperatorConfigList is a collection of items
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +openshift:compatibility-gen:internal
+type ServiceCertSignerOperatorConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items contains the items
+ Items []ServiceCertSignerOperatorConfig `json:"items"`
+}
diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..2661c23aa5
--- /dev/null
+++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,105 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCertSignerOperatorConfig) DeepCopyInto(out *ServiceCertSignerOperatorConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfig.
+func (in *ServiceCertSignerOperatorConfig) DeepCopy() *ServiceCertSignerOperatorConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCertSignerOperatorConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCertSignerOperatorConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCertSignerOperatorConfigList) DeepCopyInto(out *ServiceCertSignerOperatorConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ServiceCertSignerOperatorConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigList.
+func (in *ServiceCertSignerOperatorConfigList) DeepCopy() *ServiceCertSignerOperatorConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCertSignerOperatorConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceCertSignerOperatorConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCertSignerOperatorConfigSpec) DeepCopyInto(out *ServiceCertSignerOperatorConfigSpec) {
+ *out = *in
+ in.OperatorSpec.DeepCopyInto(&out.OperatorSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigSpec.
+func (in *ServiceCertSignerOperatorConfigSpec) DeepCopy() *ServiceCertSignerOperatorConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCertSignerOperatorConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceCertSignerOperatorConfigStatus) DeepCopyInto(out *ServiceCertSignerOperatorConfigStatus) {
+ *out = *in
+ in.OperatorStatus.DeepCopyInto(&out.OperatorStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigStatus.
+func (in *ServiceCertSignerOperatorConfigStatus) DeepCopy() *ServiceCertSignerOperatorConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceCertSignerOperatorConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..13b3b73644
--- /dev/null
+++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,33 @@
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_ServiceCertSignerOperatorConfig = map[string]string{
+ "": "ServiceCertSignerOperatorConfig provides information to configure an operator to manage the service cert signing controllers\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (ServiceCertSignerOperatorConfig) SwaggerDoc() map[string]string {
+ return map_ServiceCertSignerOperatorConfig
+}
+
+var map_ServiceCertSignerOperatorConfigList = map[string]string{
+ "": "ServiceCertSignerOperatorConfigList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items contains the items",
+}
+
+func (ServiceCertSignerOperatorConfigList) SwaggerDoc() map[string]string {
+ return map_ServiceCertSignerOperatorConfigList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/sharedresource/.codegen.yaml b/vendor/github.com/openshift/api/sharedresource/.codegen.yaml
new file mode 100644
index 0000000000..ffa2c8d9b2
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/.codegen.yaml
@@ -0,0 +1,2 @@
+swaggerdocs:
+ commentPolicy: Warn
diff --git a/vendor/github.com/openshift/api/sharedresource/OWNERS b/vendor/github.com/openshift/api/sharedresource/OWNERS
new file mode 100644
index 0000000000..c89bc9387f
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/OWNERS
@@ -0,0 +1,5 @@
+reviewers:
+ - bparees
+ - gabemontero
+ - adambkaplan
+ - coreydaley
diff --git a/vendor/github.com/openshift/api/sharedresource/install.go b/vendor/github.com/openshift/api/sharedresource/install.go
new file mode 100644
index 0000000000..40eae94a98
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/install.go
@@ -0,0 +1,26 @@
+package sharedresource
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ v1alpha1 "github.com/openshift/api/sharedresource/v1alpha1"
+)
+
+const (
+ GroupName = "sharedresource.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(v1alpha1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/Makefile b/vendor/github.com/openshift/api/sharedresource/v1alpha1/Makefile
new file mode 100644
index 0000000000..330157e5b3
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/Makefile
@@ -0,0 +1,3 @@
+.PHONY: test
+test:
+ make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="sharedresource.openshift.io/v1alpha1"
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/doc.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/doc.go
new file mode 100644
index 0000000000..833dd7f12c
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/doc.go
@@ -0,0 +1,7 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=sharedresource.openshift.io
+// Package v1alplha1 is the v1alpha1 version of the API.
+package v1alpha1
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/register.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/register.go
new file mode 100644
index 0000000000..c390b46fc0
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/register.go
@@ -0,0 +1,53 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ Version = "v1alpha1"
+ GroupName = "sharedresource.openshift.io"
+)
+
+var (
+ scheme = runtime.NewScheme()
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version}
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // Install is a function which adds this version to a scheme
+ Install = SchemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+func init() {
+ AddToScheme(scheme)
+}
+
+// addKnownTypes adds the set of types defined in this package to the supplied scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &SharedConfigMap{},
+ &SharedConfigMapList{},
+ &SharedSecret{},
+ &SharedSecretList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go
new file mode 100644
index 0000000000..1eea47d02d
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go
@@ -0,0 +1,98 @@
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SharedConfigMap allows a ConfigMap to be shared across namespaces.
+// Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the
+// "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedConfigMap in the volume attributes:
+//
+// spec:
+// volumes:
+// - name: shared-configmap
+// csi:
+// driver: csi.sharedresource.openshift.io
+// volumeAttributes:
+// sharedConfigMap: my-share
+//
+// For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object
+// within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating
+// such Role and RoleBinding objects.
+//
+// `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`
+// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`
+//
+// Shared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// These capabilities should not be used by applications needing long term support.
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=sharedconfigmaps,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/979
+// +kubebuilder:metadata:annotations="description=Extension for sharing ConfigMaps across Namespaces"
+// +kubebuilder:metadata:annotations="displayName=SharedConfigMap"
+// +k8s:openapi-gen=true
+// +openshift:compatibility-gen:level=4
+type SharedConfigMap struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the specification of the desired shared configmap
+ // +kubebuilder:validation:Required
+ Spec SharedConfigMapSpec `json:"spec,omitempty"`
+
+ // status is the observed status of the shared configmap
+ Status SharedConfigMapStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SharedConfigMapList contains a list of SharedConfigMap objects.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type SharedConfigMapList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []SharedConfigMap `json:"items"`
+}
+
+// SharedConfigMapReference contains information about which ConfigMap to share
+type SharedConfigMapReference struct {
+ // name represents the name of the ConfigMap that is being referenced.
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+ // namespace represents the namespace where the referenced ConfigMap is located.
+ // +kubebuilder:validation:Required
+ Namespace string `json:"namespace"`
+}
+
+// SharedConfigMapSpec defines the desired state of a SharedConfigMap
+// +k8s:openapi-gen=true
+type SharedConfigMapSpec struct {
+ //configMapRef is a reference to the ConfigMap to share
+ // +kubebuilder:validation:Required
+ ConfigMapRef SharedConfigMapReference `json:"configMapRef"`
+ // description is a user readable explanation of what the backing resource provides.
+ Description string `json:"description,omitempty"`
+}
+
+// SharedSecretStatus contains the observed status of the shared resource
+type SharedConfigMapStatus struct {
+ // conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go
new file mode 100644
index 0000000000..654106bce6
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go
@@ -0,0 +1,97 @@
+package v1alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SharedSecret allows a Secret to be shared across namespaces.
+// Pods can mount the shared Secret by adding a CSI volume to the pod specification using the
+// "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedSecret in the volume attributes:
+//
+// spec:
+// volumes:
+// - name: shared-secret
+// csi:
+// driver: csi.sharedresource.openshift.io
+// volumeAttributes:
+// sharedSecret: my-share
+//
+// For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object
+// within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating
+// such Role and RoleBinding objects.
+//
+// `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`
+// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`
+//
+// Shared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:path=sharedsecrets,scope=Cluster
+// +kubebuilder:subresource:status
+// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/979
+// +kubebuilder:metadata:annotations="description=Extension for sharing Secrets across Namespaces"
+// +kubebuilder:metadata:annotations="displayName=SharedSecret"
+type SharedSecret struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec is the specification of the desired shared secret
+ // +kubebuilder:validation:Required
+ Spec SharedSecretSpec `json:"spec,omitempty"`
+
+ // status is the observed status of the shared secret
+ Status SharedSecretStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SharedSecretList contains a list of SharedSecret objects.
+//
+// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.
+// These capabilities should not be used by applications needing long term support.
+// +openshift:compatibility-gen:level=4
+type SharedSecretList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []SharedSecret `json:"items"`
+}
+
+// SharedSecretReference contains information about which Secret to share
+type SharedSecretReference struct {
+ // name represents the name of the Secret that is being referenced.
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+ // namespace represents the namespace where the referenced Secret is located.
+ // +kubebuilder:validation:Required
+ Namespace string `json:"namespace"`
+}
+
+// SharedSecretSpec defines the desired state of a SharedSecret
+// +k8s:openapi-gen=true
+type SharedSecretSpec struct {
+ // secretRef is a reference to the Secret to share
+ // +kubebuilder:validation:Required
+ SecretRef SharedSecretReference `json:"secretRef"`
+ // description is a user readable explanation of what the backing resource provides.
+ Description string `json:"description,omitempty"`
+}
+
+// SharedSecretStatus contains the observed status of the shared resource
+type SharedSecretStatus struct {
+ // conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller.
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..25ecd38363
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,245 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedConfigMap) DeepCopyInto(out *SharedConfigMap) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMap.
+func (in *SharedConfigMap) DeepCopy() *SharedConfigMap {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedConfigMap)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SharedConfigMap) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedConfigMapList) DeepCopyInto(out *SharedConfigMapList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]SharedConfigMap, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapList.
+func (in *SharedConfigMapList) DeepCopy() *SharedConfigMapList {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedConfigMapList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SharedConfigMapList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedConfigMapReference) DeepCopyInto(out *SharedConfigMapReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapReference.
+func (in *SharedConfigMapReference) DeepCopy() *SharedConfigMapReference {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedConfigMapReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedConfigMapSpec) DeepCopyInto(out *SharedConfigMapSpec) {
+ *out = *in
+ out.ConfigMapRef = in.ConfigMapRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapSpec.
+func (in *SharedConfigMapSpec) DeepCopy() *SharedConfigMapSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedConfigMapSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedConfigMapStatus) DeepCopyInto(out *SharedConfigMapStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapStatus.
+func (in *SharedConfigMapStatus) DeepCopy() *SharedConfigMapStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedConfigMapStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedSecret) DeepCopyInto(out *SharedSecret) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecret.
+func (in *SharedSecret) DeepCopy() *SharedSecret {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedSecret)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SharedSecret) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedSecretList) DeepCopyInto(out *SharedSecretList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]SharedSecret, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretList.
+func (in *SharedSecretList) DeepCopy() *SharedSecretList {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedSecretList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SharedSecretList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedSecretReference) DeepCopyInto(out *SharedSecretReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretReference.
+func (in *SharedSecretReference) DeepCopy() *SharedSecretReference {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedSecretReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedSecretSpec) DeepCopyInto(out *SharedSecretSpec) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretSpec.
+func (in *SharedSecretSpec) DeepCopy() *SharedSecretSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedSecretSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SharedSecretStatus) DeepCopyInto(out *SharedSecretStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretStatus.
+func (in *SharedSecretStatus) DeepCopy() *SharedSecretStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SharedSecretStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
new file mode 100644
index 0000000000..874f1831e8
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.featuregated-crd-manifests.yaml
@@ -0,0 +1,46 @@
+sharedconfigmaps.sharedresource.openshift.io:
+ Annotations:
+ description: Extension for sharing ConfigMaps across Namespaces
+ displayName: SharedConfigMap
+ ApprovedPRNumber: https://github.com/openshift/api/pull/979
+ CRDName: sharedconfigmaps.sharedresource.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: ""
+ FilenameRunLevel: ""
+ GroupName: sharedresource.openshift.io
+ HasStatus: true
+ KindName: SharedConfigMap
+ Labels: {}
+ PluralName: sharedconfigmaps
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1alpha1
+
+sharedsecrets.sharedresource.openshift.io:
+ Annotations:
+ description: Extension for sharing Secrets across Namespaces
+ displayName: SharedSecret
+ ApprovedPRNumber: https://github.com/openshift/api/pull/979
+ CRDName: sharedsecrets.sharedresource.openshift.io
+ Capability: ""
+ Category: ""
+ FeatureGates: []
+ FilenameOperatorName: ""
+ FilenameOperatorOrdering: ""
+ FilenameRunLevel: ""
+ GroupName: sharedresource.openshift.io
+ HasStatus: true
+ KindName: SharedSecret
+ Labels: {}
+ PluralName: sharedsecrets
+ PrinterColumns: []
+ Scope: Cluster
+ ShortNames: null
+ TopLevelFeatureGates: []
+ Version: v1alpha1
+
diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..f432d63f77
--- /dev/null
+++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,112 @@
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_SharedConfigMap = map[string]string{
+ "": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n volumes:\n - name: shared-configmap\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired shared configmap",
+ "status": "status is the observed status of the shared configmap",
+}
+
+func (SharedConfigMap) SwaggerDoc() map[string]string {
+ return map_SharedConfigMap
+}
+
+var map_SharedConfigMapList = map[string]string{
+ "": "SharedConfigMapList contains a list of SharedConfigMap objects.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (SharedConfigMapList) SwaggerDoc() map[string]string {
+ return map_SharedConfigMapList
+}
+
+var map_SharedConfigMapReference = map[string]string{
+ "": "SharedConfigMapReference contains information about which ConfigMap to share",
+ "name": "name represents the name of the ConfigMap that is being referenced.",
+ "namespace": "namespace represents the namespace where the referenced ConfigMap is located.",
+}
+
+func (SharedConfigMapReference) SwaggerDoc() map[string]string {
+ return map_SharedConfigMapReference
+}
+
+var map_SharedConfigMapSpec = map[string]string{
+ "": "SharedConfigMapSpec defines the desired state of a SharedConfigMap",
+ "configMapRef": "configMapRef is a reference to the ConfigMap to share",
+ "description": "description is a user readable explanation of what the backing resource provides.",
+}
+
+func (SharedConfigMapSpec) SwaggerDoc() map[string]string {
+ return map_SharedConfigMapSpec
+}
+
+var map_SharedConfigMapStatus = map[string]string{
+ "": "SharedSecretStatus contains the observed status of the shared resource",
+ "conditions": "conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller.",
+}
+
+func (SharedConfigMapStatus) SwaggerDoc() map[string]string {
+ return map_SharedConfigMapStatus
+}
+
+var map_SharedSecret = map[string]string{
+ "": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n volumes:\n - name: shared-secret\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec is the specification of the desired shared secret",
+ "status": "status is the observed status of the shared secret",
+}
+
+func (SharedSecret) SwaggerDoc() map[string]string {
+ return map_SharedSecret
+}
+
+var map_SharedSecretList = map[string]string{
+ "": "SharedSecretList contains a list of SharedSecret objects.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (SharedSecretList) SwaggerDoc() map[string]string {
+ return map_SharedSecretList
+}
+
+var map_SharedSecretReference = map[string]string{
+ "": "SharedSecretReference contains information about which Secret to share",
+ "name": "name represents the name of the Secret that is being referenced.",
+ "namespace": "namespace represents the namespace where the referenced Secret is located.",
+}
+
+func (SharedSecretReference) SwaggerDoc() map[string]string {
+ return map_SharedSecretReference
+}
+
+var map_SharedSecretSpec = map[string]string{
+ "": "SharedSecretSpec defines the desired state of a SharedSecret",
+ "secretRef": "secretRef is a reference to the Secret to share",
+ "description": "description is a user readable explanation of what the backing resource provides.",
+}
+
+func (SharedSecretSpec) SwaggerDoc() map[string]string {
+ return map_SharedSecretSpec
+}
+
+var map_SharedSecretStatus = map[string]string{
+ "": "SharedSecretStatus contains the observed status of the shared resource",
+ "conditions": "conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller.",
+}
+
+func (SharedSecretStatus) SwaggerDoc() map[string]string {
+ return map_SharedSecretStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/template/OWNERS b/vendor/github.com/openshift/api/template/OWNERS
new file mode 100644
index 0000000000..c1ece8b213
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/OWNERS
@@ -0,0 +1,4 @@
+reviewers:
+ - bparees
+ - gabemontero
+ - jim-minter
diff --git a/vendor/github.com/openshift/api/template/install.go b/vendor/github.com/openshift/api/template/install.go
new file mode 100644
index 0000000000..8a69398dd0
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/install.go
@@ -0,0 +1,26 @@
+package template
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ templatev1 "github.com/openshift/api/template/v1"
+)
+
+const (
+ GroupName = "template.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(templatev1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/template/v1/codec.go b/vendor/github.com/openshift/api/template/v1/codec.go
new file mode 100644
index 0000000000..9e9177ed6a
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/codec.go
@@ -0,0 +1,33 @@
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/openshift/api/pkg/serialization"
+)
+
+var _ runtime.NestedObjectDecoder = &Template{}
+var _ runtime.NestedObjectEncoder = &Template{}
+
+// DecodeNestedObjects decodes the object as a runtime.Unknown with JSON content.
+func (c *Template) DecodeNestedObjects(d runtime.Decoder) error {
+ for i := range c.Objects {
+ if c.Objects[i].Object != nil {
+ continue
+ }
+ c.Objects[i].Object = &runtime.Unknown{
+ ContentType: "application/json",
+ Raw: c.Objects[i].Raw,
+ }
+ }
+ return nil
+}
+func (c *Template) EncodeNestedObjects(e runtime.Encoder) error {
+ for i := range c.Objects {
+ if err := serialization.EncodeNestedRawExtension(unstructured.UnstructuredJSONScheme, &c.Objects[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/template/v1/consts.go b/vendor/github.com/openshift/api/template/v1/consts.go
new file mode 100644
index 0000000000..cc8b49d55f
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/consts.go
@@ -0,0 +1,16 @@
+package v1
+
+const (
+ // TemplateInstanceFinalizer is used to clean up the objects created by the template instance,
+ // when the template instance is deleted.
+ TemplateInstanceFinalizer = "template.openshift.io/finalizer"
+
+ // TemplateInstanceOwner is a label applied to all objects created from a template instance
+ // which contains the uid of the template instance.
+ TemplateInstanceOwner = "template.openshift.io/template-instance-owner"
+
+ // WaitForReadyAnnotation indicates that the TemplateInstance controller
+ // should wait for the object to be ready before reporting the template
+ // instantiation complete.
+ WaitForReadyAnnotation = "template.alpha.openshift.io/wait-for-ready"
+)
diff --git a/vendor/github.com/openshift/api/template/v1/doc.go b/vendor/github.com/openshift/api/template/v1/doc.go
new file mode 100644
index 0000000000..34f9f8d455
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/template/apis/template
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=template.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/template/v1/generated.pb.go b/vendor/github.com/openshift/api/template/v1/generated.pb.go
new file mode 100644
index 0000000000..df724d89d4
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/generated.pb.go
@@ -0,0 +1,4115 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/template/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v11 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *BrokerTemplateInstance) Reset() { *m = BrokerTemplateInstance{} }
+func (*BrokerTemplateInstance) ProtoMessage() {}
+func (*BrokerTemplateInstance) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{0}
+}
+func (m *BrokerTemplateInstance) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BrokerTemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BrokerTemplateInstance) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BrokerTemplateInstance.Merge(m, src)
+}
+func (m *BrokerTemplateInstance) XXX_Size() int {
+ return m.Size()
+}
+func (m *BrokerTemplateInstance) XXX_DiscardUnknown() {
+ xxx_messageInfo_BrokerTemplateInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BrokerTemplateInstance proto.InternalMessageInfo
+
+func (m *BrokerTemplateInstanceList) Reset() { *m = BrokerTemplateInstanceList{} }
+func (*BrokerTemplateInstanceList) ProtoMessage() {}
+func (*BrokerTemplateInstanceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{1}
+}
+func (m *BrokerTemplateInstanceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BrokerTemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BrokerTemplateInstanceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BrokerTemplateInstanceList.Merge(m, src)
+}
+func (m *BrokerTemplateInstanceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *BrokerTemplateInstanceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_BrokerTemplateInstanceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BrokerTemplateInstanceList proto.InternalMessageInfo
+
+func (m *BrokerTemplateInstanceSpec) Reset() { *m = BrokerTemplateInstanceSpec{} }
+func (*BrokerTemplateInstanceSpec) ProtoMessage() {}
+func (*BrokerTemplateInstanceSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{2}
+}
+func (m *BrokerTemplateInstanceSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BrokerTemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *BrokerTemplateInstanceSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BrokerTemplateInstanceSpec.Merge(m, src)
+}
+func (m *BrokerTemplateInstanceSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *BrokerTemplateInstanceSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_BrokerTemplateInstanceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BrokerTemplateInstanceSpec proto.InternalMessageInfo
+
+func (m *ExtraValue) Reset() { *m = ExtraValue{} }
+func (*ExtraValue) ProtoMessage() {}
+func (*ExtraValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{3}
+}
+func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExtraValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExtraValue.Merge(m, src)
+}
+func (m *ExtraValue) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExtraValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExtraValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
+
+func (m *Parameter) Reset() { *m = Parameter{} }
+func (*Parameter) ProtoMessage() {}
+func (*Parameter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{4}
+}
+func (m *Parameter) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Parameter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Parameter.Merge(m, src)
+}
+func (m *Parameter) XXX_Size() int {
+ return m.Size()
+}
+func (m *Parameter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Parameter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Parameter proto.InternalMessageInfo
+
+func (m *Template) Reset() { *m = Template{} }
+func (*Template) ProtoMessage() {}
+func (*Template) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{5}
+}
+func (m *Template) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Template) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Template.Merge(m, src)
+}
+func (m *Template) XXX_Size() int {
+ return m.Size()
+}
+func (m *Template) XXX_DiscardUnknown() {
+ xxx_messageInfo_Template.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Template proto.InternalMessageInfo
+
+func (m *TemplateInstance) Reset() { *m = TemplateInstance{} }
+func (*TemplateInstance) ProtoMessage() {}
+func (*TemplateInstance) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{6}
+}
+func (m *TemplateInstance) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateInstance) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateInstance.Merge(m, src)
+}
+func (m *TemplateInstance) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateInstance) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateInstance proto.InternalMessageInfo
+
+func (m *TemplateInstanceCondition) Reset() { *m = TemplateInstanceCondition{} }
+func (*TemplateInstanceCondition) ProtoMessage() {}
+func (*TemplateInstanceCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{7}
+}
+func (m *TemplateInstanceCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateInstanceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateInstanceCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateInstanceCondition.Merge(m, src)
+}
+func (m *TemplateInstanceCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateInstanceCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateInstanceCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateInstanceCondition proto.InternalMessageInfo
+
+func (m *TemplateInstanceList) Reset() { *m = TemplateInstanceList{} }
+func (*TemplateInstanceList) ProtoMessage() {}
+func (*TemplateInstanceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{8}
+}
+func (m *TemplateInstanceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateInstanceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateInstanceList.Merge(m, src)
+}
+func (m *TemplateInstanceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateInstanceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateInstanceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateInstanceList proto.InternalMessageInfo
+
+func (m *TemplateInstanceObject) Reset() { *m = TemplateInstanceObject{} }
+func (*TemplateInstanceObject) ProtoMessage() {}
+func (*TemplateInstanceObject) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{9}
+}
+func (m *TemplateInstanceObject) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateInstanceObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateInstanceObject) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateInstanceObject.Merge(m, src)
+}
+func (m *TemplateInstanceObject) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateInstanceObject) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateInstanceObject.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateInstanceObject proto.InternalMessageInfo
+
+func (m *TemplateInstanceRequester) Reset() { *m = TemplateInstanceRequester{} }
+func (*TemplateInstanceRequester) ProtoMessage() {}
+func (*TemplateInstanceRequester) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{10}
+}
+func (m *TemplateInstanceRequester) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateInstanceRequester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateInstanceRequester) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateInstanceRequester.Merge(m, src)
+}
+func (m *TemplateInstanceRequester) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateInstanceRequester) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateInstanceRequester.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateInstanceRequester proto.InternalMessageInfo
+
+func (m *TemplateInstanceSpec) Reset() { *m = TemplateInstanceSpec{} }
+func (*TemplateInstanceSpec) ProtoMessage() {}
+func (*TemplateInstanceSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{11}
+}
+func (m *TemplateInstanceSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateInstanceSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateInstanceSpec.Merge(m, src)
+}
+func (m *TemplateInstanceSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateInstanceSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateInstanceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateInstanceSpec proto.InternalMessageInfo
+
+func (m *TemplateInstanceStatus) Reset() { *m = TemplateInstanceStatus{} }
+func (*TemplateInstanceStatus) ProtoMessage() {}
+func (*TemplateInstanceStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{12}
+}
+func (m *TemplateInstanceStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateInstanceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateInstanceStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateInstanceStatus.Merge(m, src)
+}
+func (m *TemplateInstanceStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateInstanceStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateInstanceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateInstanceStatus proto.InternalMessageInfo
+
+func (m *TemplateList) Reset() { *m = TemplateList{} }
+func (*TemplateList) ProtoMessage() {}
+func (*TemplateList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8d3ee9f55fa8363e, []int{13}
+}
+func (m *TemplateList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TemplateList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TemplateList.Merge(m, src)
+}
+func (m *TemplateList) XXX_Size() int {
+ return m.Size()
+}
+func (m *TemplateList) XXX_DiscardUnknown() {
+ xxx_messageInfo_TemplateList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TemplateList proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*BrokerTemplateInstance)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstance")
+ proto.RegisterType((*BrokerTemplateInstanceList)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceList")
+ proto.RegisterType((*BrokerTemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceSpec")
+ proto.RegisterType((*ExtraValue)(nil), "github.com.openshift.api.template.v1.ExtraValue")
+ proto.RegisterType((*Parameter)(nil), "github.com.openshift.api.template.v1.Parameter")
+ proto.RegisterType((*Template)(nil), "github.com.openshift.api.template.v1.Template")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.template.v1.Template.LabelsEntry")
+ proto.RegisterType((*TemplateInstance)(nil), "github.com.openshift.api.template.v1.TemplateInstance")
+ proto.RegisterType((*TemplateInstanceCondition)(nil), "github.com.openshift.api.template.v1.TemplateInstanceCondition")
+ proto.RegisterType((*TemplateInstanceList)(nil), "github.com.openshift.api.template.v1.TemplateInstanceList")
+ proto.RegisterType((*TemplateInstanceObject)(nil), "github.com.openshift.api.template.v1.TemplateInstanceObject")
+ proto.RegisterType((*TemplateInstanceRequester)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester")
+ proto.RegisterMapType((map[string]ExtraValue)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester.ExtraEntry")
+ proto.RegisterType((*TemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.TemplateInstanceSpec")
+ proto.RegisterType((*TemplateInstanceStatus)(nil), "github.com.openshift.api.template.v1.TemplateInstanceStatus")
+ proto.RegisterType((*TemplateList)(nil), "github.com.openshift.api.template.v1.TemplateList")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/template/v1/generated.proto", fileDescriptor_8d3ee9f55fa8363e)
+}
+
+var fileDescriptor_8d3ee9f55fa8363e = []byte{
+ // 1246 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x5b, 0x45,
+ 0x17, 0xf6, 0xf5, 0x57, 0xec, 0x71, 0xdb, 0x37, 0x9a, 0xb7, 0xaa, 0x2e, 0x96, 0x6a, 0x5b, 0xb7,
+ 0x15, 0x0a, 0xa8, 0xb9, 0x26, 0x51, 0x28, 0x25, 0x42, 0x02, 0x2e, 0x49, 0xab, 0x94, 0x14, 0xd0,
+ 0x24, 0x45, 0x08, 0xb2, 0x60, 0x7c, 0x3d, 0x76, 0x6e, 0xe3, 0xfb, 0xc1, 0xcc, 0x38, 0xd4, 0xbb,
+ 0x2e, 0xf8, 0x01, 0x2c, 0x59, 0xf2, 0x13, 0x58, 0xb2, 0x42, 0x62, 0x97, 0x65, 0xd9, 0x75, 0x01,
+ 0x16, 0x31, 0x2b, 0xfe, 0x00, 0x48, 0x65, 0x83, 0x66, 0xee, 0xdc, 0x0f, 0x7f, 0x51, 0x27, 0x95,
+ 0xda, 0x9d, 0xef, 0x99, 0xf3, 0x3c, 0x67, 0xce, 0x99, 0x33, 0xcf, 0x1c, 0x83, 0x8d, 0xae, 0xc3,
+ 0x0f, 0xfb, 0x2d, 0xd3, 0xf6, 0xdd, 0xa6, 0x1f, 0x10, 0x8f, 0x1d, 0x3a, 0x1d, 0xde, 0xc4, 0x81,
+ 0xd3, 0xe4, 0xc4, 0x0d, 0x7a, 0x98, 0x93, 0xe6, 0xf1, 0x5a, 0xb3, 0x4b, 0x3c, 0x42, 0x31, 0x27,
+ 0x6d, 0x33, 0xa0, 0x3e, 0xf7, 0xe1, 0xf5, 0x04, 0x65, 0xc6, 0x28, 0x13, 0x07, 0x8e, 0x19, 0xa1,
+ 0xcc, 0xe3, 0xb5, 0xea, 0x6a, 0x8a, 0xbb, 0xeb, 0x77, 0xfd, 0xa6, 0x04, 0xb7, 0xfa, 0x1d, 0xf9,
+ 0x25, 0x3f, 0xe4, 0xaf, 0x90, 0xb4, 0x6a, 0x1c, 0xdd, 0x62, 0xa6, 0xe3, 0xcb, 0xe0, 0xb6, 0x4f,
+ 0x67, 0x05, 0xae, 0x6e, 0x24, 0x3e, 0x2e, 0xb6, 0x0f, 0x1d, 0x8f, 0xd0, 0x41, 0x33, 0x38, 0xea,
+ 0x0a, 0x03, 0x6b, 0xba, 0x84, 0xe3, 0x59, 0xa8, 0xe6, 0x3c, 0x14, 0xed, 0x7b, 0xdc, 0x71, 0xc9,
+ 0x14, 0xe0, 0xe6, 0xb3, 0x00, 0xcc, 0x3e, 0x24, 0x2e, 0x9e, 0xc4, 0x19, 0x43, 0x0d, 0x5c, 0xb1,
+ 0xa8, 0x7f, 0x44, 0xe8, 0xbe, 0xaa, 0xc3, 0x8e, 0xc7, 0x38, 0xf6, 0x6c, 0x02, 0xbf, 0x04, 0x25,
+ 0xb1, 0xbd, 0x36, 0xe6, 0x58, 0xd7, 0x1a, 0xda, 0x4a, 0x65, 0xfd, 0x0d, 0x33, 0x8c, 0x62, 0xa6,
+ 0xa3, 0x98, 0xc1, 0x51, 0x57, 0x18, 0x98, 0x29, 0xbc, 0xcd, 0xe3, 0x35, 0xf3, 0xe3, 0xd6, 0x03,
+ 0x62, 0xf3, 0x7b, 0x84, 0x63, 0x0b, 0x9e, 0x0c, 0xeb, 0x99, 0xd1, 0xb0, 0x0e, 0x12, 0x1b, 0x8a,
+ 0x59, 0x61, 0x0b, 0xe4, 0x59, 0x40, 0x6c, 0x3d, 0x2b, 0xd9, 0xdf, 0x33, 0x17, 0x39, 0x23, 0x73,
+ 0xf6, 0x6e, 0xf7, 0x02, 0x62, 0x5b, 0x17, 0x54, 0xb4, 0xbc, 0xf8, 0x42, 0x92, 0xdb, 0xf8, 0x4d,
+ 0x03, 0xd5, 0xd9, 0x90, 0x5d, 0x87, 0x71, 0x78, 0x30, 0x95, 0xa4, 0xb9, 0x58, 0x92, 0x02, 0x2d,
+ 0x53, 0x5c, 0x56, 0x41, 0x4b, 0x91, 0x25, 0x95, 0x20, 0x06, 0x05, 0x87, 0x13, 0x97, 0xe9, 0xd9,
+ 0x46, 0x6e, 0xa5, 0xb2, 0xfe, 0xce, 0xf3, 0x64, 0x68, 0x5d, 0x54, 0x81, 0x0a, 0x3b, 0x82, 0x12,
+ 0x85, 0xcc, 0xc6, 0x37, 0xd9, 0x79, 0xf9, 0x89, 0x22, 0x40, 0x07, 0x2c, 0xf3, 0x09, 0xbb, 0xca,
+ 0xf3, 0x5a, 0x2a, 0x4f, 0x53, 0x74, 0x6f, 0x72, 0x74, 0x88, 0x74, 0x08, 0x25, 0x22, 0xa6, 0xae,
+ 0x62, 0x2e, 0x4f, 0x92, 0xa3, 0x29, 0x5a, 0xf8, 0x21, 0x28, 0x32, 0x62, 0x53, 0xc2, 0xd5, 0x79,
+ 0x2e, 0x14, 0xe0, 0x92, 0x0a, 0x50, 0xdc, 0x93, 0x50, 0xa4, 0x28, 0xa0, 0x09, 0x40, 0xcb, 0xf1,
+ 0xda, 0x8e, 0xd7, 0xdd, 0xd9, 0x62, 0x7a, 0xae, 0x91, 0x5b, 0x29, 0x5b, 0x97, 0x44, 0x23, 0x59,
+ 0xb1, 0x15, 0xa5, 0x3c, 0x8c, 0xb7, 0x00, 0xd8, 0x7e, 0xc8, 0x29, 0xfe, 0x14, 0xf7, 0xfa, 0x04,
+ 0xd6, 0xa3, 0xba, 0x6b, 0x12, 0x58, 0x9e, 0xac, 0xda, 0x66, 0xe9, 0xbb, 0xef, 0xeb, 0x99, 0x47,
+ 0xbf, 0x36, 0x32, 0xc6, 0x4f, 0x59, 0x50, 0xfe, 0x04, 0x53, 0xec, 0x12, 0x4e, 0x28, 0x6c, 0x80,
+ 0xbc, 0x87, 0xdd, 0xb0, 0x44, 0xe5, 0xa4, 0x9f, 0x3e, 0xc2, 0x2e, 0x41, 0x72, 0x05, 0xbe, 0x09,
+ 0x2a, 0x6d, 0x87, 0x05, 0x3d, 0x3c, 0x10, 0x46, 0x99, 0x6a, 0xd9, 0xfa, 0xbf, 0x72, 0xac, 0x6c,
+ 0x25, 0x4b, 0x28, 0xed, 0x27, 0x61, 0x84, 0xd9, 0xd4, 0x09, 0xb8, 0xe3, 0x7b, 0x7a, 0x6e, 0x02,
+ 0x96, 0x2c, 0xa1, 0xb4, 0x1f, 0xbc, 0x06, 0x0a, 0xc7, 0x22, 0x23, 0x3d, 0x2f, 0x01, 0x71, 0x0b,
+ 0xc8, 0x34, 0x51, 0xb8, 0x06, 0x6f, 0x80, 0x52, 0x74, 0xad, 0xf5, 0x82, 0xf4, 0x8b, 0x7b, 0xf2,
+ 0x8e, 0xb2, 0xa3, 0xd8, 0x43, 0xa4, 0xd8, 0xa1, 0xbe, 0xab, 0x17, 0xc7, 0x53, 0xbc, 0x4d, 0x7d,
+ 0x17, 0xc9, 0x15, 0xc1, 0x47, 0xc9, 0x57, 0x7d, 0x87, 0x92, 0xb6, 0xbe, 0xd4, 0xd0, 0x56, 0x4a,
+ 0x09, 0x1f, 0x52, 0x76, 0x14, 0x7b, 0x18, 0xff, 0xe4, 0x40, 0x29, 0xea, 0x8e, 0x17, 0xa0, 0x19,
+ 0xaf, 0x81, 0x25, 0x97, 0x30, 0x86, 0xbb, 0x51, 0xed, 0xff, 0xa7, 0xdc, 0x97, 0xee, 0x85, 0x66,
+ 0x14, 0xad, 0xc3, 0xcf, 0xc0, 0x92, 0x2f, 0x29, 0xc2, 0x06, 0xaa, 0xac, 0xaf, 0xce, 0xdd, 0x8b,
+ 0x52, 0x49, 0x13, 0xe1, 0xaf, 0xb7, 0x1f, 0x72, 0xe2, 0x31, 0xc7, 0xf7, 0x12, 0xe6, 0x70, 0x23,
+ 0x0c, 0x45, 0x74, 0xd0, 0x06, 0x20, 0x88, 0x7a, 0x86, 0xe9, 0x79, 0x49, 0xde, 0x5c, 0xec, 0x72,
+ 0xc7, 0xbd, 0x96, 0xe4, 0x19, 0x9b, 0x18, 0x4a, 0xd1, 0xc2, 0x43, 0x50, 0xec, 0xe1, 0x16, 0xe9,
+ 0x31, 0xbd, 0x20, 0x03, 0x6c, 0x2e, 0x16, 0x20, 0x3a, 0x0b, 0x73, 0x57, 0x82, 0xb7, 0x3d, 0x4e,
+ 0x07, 0xd6, 0x65, 0x15, 0xeb, 0x42, 0x98, 0x4a, 0xb8, 0x84, 0x14, 0x7f, 0xf5, 0x6d, 0x50, 0x49,
+ 0x39, 0xc3, 0x65, 0x90, 0x3b, 0x22, 0x83, 0xf0, 0x0e, 0x20, 0xf1, 0x13, 0x5e, 0x8e, 0xda, 0x50,
+ 0x96, 0x5c, 0xf5, 0xdd, 0x66, 0xf6, 0x96, 0x66, 0xfc, 0x98, 0x05, 0xcb, 0x2f, 0xe1, 0xe5, 0x38,
+ 0x18, 0x7b, 0x39, 0xce, 0x58, 0x99, 0x67, 0xbd, 0x19, 0xb0, 0x0d, 0x8a, 0x8c, 0x63, 0xde, 0x67,
+ 0xf2, 0x9e, 0x2e, 0xac, 0xdb, 0x53, 0xfc, 0x92, 0x23, 0x25, 0x71, 0xf2, 0x1b, 0x29, 0x6e, 0xe3,
+ 0xef, 0x2c, 0x78, 0x65, 0x12, 0xf2, 0x81, 0xef, 0xb5, 0x1d, 0x79, 0xf3, 0xdf, 0x07, 0x79, 0x3e,
+ 0x08, 0x22, 0x25, 0x5a, 0x8d, 0x76, 0xb9, 0x3f, 0x08, 0xc8, 0xd3, 0x61, 0xfd, 0xea, 0x5c, 0xa0,
+ 0x70, 0x40, 0x12, 0x0a, 0x77, 0xe3, 0x34, 0xc2, 0x9b, 0xb2, 0x31, 0xbe, 0x91, 0xa7, 0xc3, 0xfa,
+ 0x8c, 0x01, 0xc6, 0x8c, 0x99, 0xc6, 0xb7, 0x0b, 0x8f, 0x01, 0xec, 0x61, 0xc6, 0xf7, 0x29, 0xf6,
+ 0x58, 0x18, 0xc9, 0x71, 0x89, 0x2a, 0xd0, 0xeb, 0x8b, 0x1d, 0xaf, 0x40, 0x58, 0x55, 0xb5, 0x0b,
+ 0xb8, 0x3b, 0xc5, 0x86, 0x66, 0x44, 0x80, 0xaf, 0x82, 0x22, 0x25, 0x98, 0xf9, 0x9e, 0xd2, 0xc0,
+ 0xb8, 0x9c, 0x48, 0x5a, 0x91, 0x5a, 0x4d, 0x0b, 0x43, 0xe1, 0xbf, 0x85, 0xc1, 0xf8, 0x45, 0x03,
+ 0x97, 0x5f, 0xc2, 0x34, 0xf0, 0xc5, 0xf8, 0x34, 0x70, 0xf3, 0x7c, 0x5d, 0x35, 0x67, 0x0e, 0x38,
+ 0x00, 0x57, 0x26, 0x3d, 0xc3, 0x9b, 0x03, 0x2d, 0x90, 0xa3, 0xa4, 0x73, 0x96, 0x57, 0xbf, 0xa2,
+ 0x22, 0xe4, 0x10, 0xe9, 0x20, 0x01, 0x36, 0xfe, 0x9c, 0xd1, 0xab, 0xe2, 0x2d, 0x20, 0x4c, 0xbc,
+ 0x9a, 0x37, 0x40, 0xa9, 0xcf, 0x08, 0x4d, 0xbd, 0x9c, 0x71, 0x19, 0xee, 0x2b, 0x3b, 0x8a, 0x3d,
+ 0xe0, 0x55, 0x90, 0xeb, 0x3b, 0x6d, 0xd5, 0x93, 0x71, 0xa8, 0xfb, 0x3b, 0x5b, 0x48, 0xd8, 0xa1,
+ 0x01, 0x8a, 0x5d, 0xea, 0xf7, 0x83, 0xe8, 0xd5, 0x07, 0xe2, 0xac, 0xef, 0x48, 0x0b, 0x52, 0x2b,
+ 0xd0, 0x07, 0x05, 0x22, 0x5e, 0x7b, 0x25, 0xbd, 0x77, 0xcf, 0x57, 0xc9, 0x38, 0x01, 0x53, 0x8e,
+ 0x0e, 0xa1, 0x52, 0xc6, 0xd5, 0x95, 0x36, 0x14, 0xc6, 0xa9, 0x3e, 0x50, 0xe3, 0xc5, 0x3c, 0x81,
+ 0xbc, 0x9d, 0x16, 0x48, 0x21, 0x77, 0x0b, 0x6d, 0x28, 0x99, 0x58, 0xd2, 0x92, 0xfa, 0x43, 0x76,
+ 0xba, 0x3b, 0xe5, 0x2c, 0x77, 0x00, 0x4a, 0x11, 0x3a, 0xee, 0xce, 0x33, 0x25, 0x9e, 0x1c, 0x4b,
+ 0x64, 0x41, 0x31, 0xa3, 0x54, 0x8b, 0xf4, 0xf8, 0xb6, 0x32, 0xab, 0x53, 0x76, 0x7d, 0x1b, 0xf7,
+ 0x26, 0xdb, 0x05, 0xcc, 0x98, 0xdf, 0x7a, 0xa0, 0x4c, 0xa3, 0xf2, 0x2a, 0x91, 0x78, 0xf7, 0x39,
+ 0x4f, 0xc9, 0xba, 0x38, 0x1a, 0xd6, 0xcb, 0xf1, 0x27, 0x4a, 0x02, 0x18, 0x7f, 0x69, 0xd3, 0xdd,
+ 0x1f, 0xca, 0x17, 0x64, 0x00, 0xd8, 0x91, 0xa2, 0x85, 0xf3, 0xe0, 0xb9, 0x77, 0x12, 0x2b, 0x63,
+ 0xf2, 0x38, 0xc5, 0x26, 0x86, 0x52, 0x61, 0x60, 0x37, 0x99, 0x3c, 0xce, 0x34, 0xf9, 0xcf, 0xbe,
+ 0xc1, 0xf3, 0x07, 0x11, 0xe3, 0x67, 0x0d, 0x5c, 0x88, 0x40, 0x2f, 0x40, 0xc1, 0xf6, 0xc6, 0x15,
+ 0xec, 0xac, 0xed, 0x37, 0x53, 0xb9, 0xac, 0xbb, 0x27, 0xa7, 0xb5, 0xcc, 0xe3, 0xd3, 0x5a, 0xe6,
+ 0xc9, 0x69, 0x2d, 0xf3, 0x68, 0x54, 0xd3, 0x4e, 0x46, 0x35, 0xed, 0xf1, 0xa8, 0xa6, 0x3d, 0x19,
+ 0xd5, 0xb4, 0xdf, 0x47, 0x35, 0xed, 0xdb, 0x3f, 0x6a, 0x99, 0xcf, 0xaf, 0x2f, 0xf2, 0xb7, 0xff,
+ 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x61, 0xc4, 0xab, 0x1d, 0x10, 0x00, 0x00,
+}
+
+func (m *BrokerTemplateInstance) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BrokerTemplateInstance) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BrokerTemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BrokerTemplateInstanceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BrokerTemplateInstanceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BrokerTemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *BrokerTemplateInstanceSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BrokerTemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BrokerTemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.BindingIDs) > 0 {
+ for iNdEx := len(m.BindingIDs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.BindingIDs[iNdEx])
+ copy(dAtA[i:], m.BindingIDs[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingIDs[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ {
+ size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.TemplateInstance.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m ExtraValue) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m[iNdEx])
+ copy(dAtA[i:], m[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Parameter) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Parameter) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Parameter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.Required {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ i -= len(m.From)
+ copy(dAtA[i:], m.From)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.From)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Generate)
+ copy(dAtA[i:], m.Generate)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Generate)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.DisplayName)
+ copy(dAtA[i:], m.DisplayName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Template) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Template) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ObjectLabels) > 0 {
+ keysForObjectLabels := make([]string, 0, len(m.ObjectLabels))
+ for k := range m.ObjectLabels {
+ keysForObjectLabels = append(keysForObjectLabels, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels)
+ for iNdEx := len(keysForObjectLabels) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.ObjectLabels[string(keysForObjectLabels[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForObjectLabels[iNdEx])
+ copy(dAtA[i:], keysForObjectLabels[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForObjectLabels[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Parameters) > 0 {
+ for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Objects) > 0 {
+ for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateInstance) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateInstance) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateInstanceCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateInstanceCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateInstanceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateInstanceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateInstanceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateInstanceObject) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateInstanceObject) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateInstanceObject) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateInstanceRequester) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateInstanceRequester) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateInstanceRequester) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Extra) > 0 {
+ keysForExtra := make([]string, 0, len(m.Extra))
+ for k := range m.Extra {
+ keysForExtra = append(keysForExtra, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Extra[string(keysForExtra[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForExtra[iNdEx])
+ copy(dAtA[i:], keysForExtra[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Groups) > 0 {
+ for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Groups[iNdEx])
+ copy(dAtA[i:], m.Groups[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Username)
+ copy(dAtA[i:], m.Username)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateInstanceSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Requester != nil {
+ {
+ size, err := m.Requester.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Secret != nil {
+ {
+ size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateInstanceStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateInstanceStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateInstanceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Objects) > 0 {
+ for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TemplateList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TemplateList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *BrokerTemplateInstance) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *BrokerTemplateInstanceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *BrokerTemplateInstanceSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.TemplateInstance.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Secret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.BindingIDs) > 0 {
+ for _, s := range m.BindingIDs {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m ExtraValue) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for _, s := range m {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Parameter) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.DisplayName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Value)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Generate)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.From)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ return n
+}
+
+func (m *Template) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Objects) > 0 {
+ for _, e := range m.Objects {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Parameters) > 0 {
+ for _, e := range m.Parameters {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ObjectLabels) > 0 {
+ for k, v := range m.ObjectLabels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *TemplateInstance) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TemplateInstanceCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TemplateInstanceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TemplateInstanceObject) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Ref.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *TemplateInstanceRequester) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Username)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Extra) > 0 {
+ for k, v := range m.Extra {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *TemplateInstanceSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Template.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Secret != nil {
+ l = m.Secret.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Requester != nil {
+ l = m.Requester.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *TemplateInstanceStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Objects) > 0 {
+ for _, e := range m.Objects {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TemplateList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *BrokerTemplateInstance) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BrokerTemplateInstance{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BrokerTemplateInstanceSpec", "BrokerTemplateInstanceSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BrokerTemplateInstanceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]BrokerTemplateInstance{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BrokerTemplateInstance", "BrokerTemplateInstance", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&BrokerTemplateInstanceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BrokerTemplateInstanceSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BrokerTemplateInstanceSpec{`,
+ `TemplateInstance:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TemplateInstance), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `BindingIDs:` + fmt.Sprintf("%v", this.BindingIDs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Parameter) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Parameter{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`,
+ `Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `Generate:` + fmt.Sprintf("%v", this.Generate) + `,`,
+ `From:` + fmt.Sprintf("%v", this.From) + `,`,
+ `Required:` + fmt.Sprintf("%v", this.Required) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Template) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForObjects := "[]RawExtension{"
+ for _, f := range this.Objects {
+ repeatedStringForObjects += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForObjects += "}"
+ repeatedStringForParameters := "[]Parameter{"
+ for _, f := range this.Parameters {
+ repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForParameters += "}"
+ keysForObjectLabels := make([]string, 0, len(this.ObjectLabels))
+ for k := range this.ObjectLabels {
+ keysForObjectLabels = append(keysForObjectLabels, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels)
+ mapStringForObjectLabels := "map[string]string{"
+ for _, k := range keysForObjectLabels {
+ mapStringForObjectLabels += fmt.Sprintf("%v: %v,", k, this.ObjectLabels[k])
+ }
+ mapStringForObjectLabels += "}"
+ s := strings.Join([]string{`&Template{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Objects:` + repeatedStringForObjects + `,`,
+ `Parameters:` + repeatedStringForParameters + `,`,
+ `ObjectLabels:` + mapStringForObjectLabels + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateInstance) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TemplateInstance{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TemplateInstanceSpec", "TemplateInstanceSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TemplateInstanceStatus", "TemplateInstanceStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateInstanceCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TemplateInstanceCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateInstanceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]TemplateInstance{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TemplateInstance", "TemplateInstance", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&TemplateInstanceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateInstanceObject) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TemplateInstanceObject{`,
+ `Ref:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ref), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateInstanceRequester) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForExtra := make([]string, 0, len(this.Extra))
+ for k := range this.Extra {
+ keysForExtra = append(keysForExtra, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ mapStringForExtra := "map[string]ExtraValue{"
+ for _, k := range keysForExtra {
+ mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
+ }
+ mapStringForExtra += "}"
+ s := strings.Join([]string{`&TemplateInstanceRequester{`,
+ `Username:` + fmt.Sprintf("%v", this.Username) + `,`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
+ `Extra:` + mapStringForExtra + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateInstanceSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TemplateInstanceSpec{`,
+ `Template:` + strings.Replace(strings.Replace(this.Template.String(), "Template", "Template", 1), `&`, ``, 1) + `,`,
+ `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`,
+ `Requester:` + strings.Replace(this.Requester.String(), "TemplateInstanceRequester", "TemplateInstanceRequester", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateInstanceStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]TemplateInstanceCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TemplateInstanceCondition", "TemplateInstanceCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ repeatedStringForObjects := "[]TemplateInstanceObject{"
+ for _, f := range this.Objects {
+ repeatedStringForObjects += strings.Replace(strings.Replace(f.String(), "TemplateInstanceObject", "TemplateInstanceObject", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForObjects += "}"
+ s := strings.Join([]string{`&TemplateInstanceStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `Objects:` + repeatedStringForObjects + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TemplateList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Template{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Template", "Template", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&TemplateList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *BrokerTemplateInstance) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BrokerTemplateInstance: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BrokerTemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BrokerTemplateInstanceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BrokerTemplateInstanceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BrokerTemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, BrokerTemplateInstance{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BrokerTemplateInstanceSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BrokerTemplateInstanceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BrokerTemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TemplateInstance", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TemplateInstance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BindingIDs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BindingIDs = append(m.BindingIDs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExtraValue) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ *m = append(*m, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Parameter) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Parameter: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisplayName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generate", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Generate = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.From = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Required = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Template) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Template: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Template: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Objects = append(m.Objects, runtime.RawExtension{})
+ if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Parameters = append(m.Parameters, Parameter{})
+ if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectLabels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ObjectLabels == nil {
+ m.ObjectLabels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.ObjectLabels[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateInstance) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateInstance: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateInstanceCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateInstanceCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateInstanceCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = TemplateInstanceConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateInstanceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateInstanceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, TemplateInstance{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateInstanceObject) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateInstanceObject: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateInstanceObject: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Ref.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateInstanceRequester) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateInstanceRequester: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateInstanceRequester: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Username = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Extra == nil {
+ m.Extra = make(map[string]ExtraValue)
+ }
+ var mapkey string
+ mapvalue := &ExtraValue{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &ExtraValue{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Extra[mapkey] = *mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateInstanceSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateInstanceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Secret == nil {
+ m.Secret = &v11.LocalObjectReference{}
+ }
+ if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Requester", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Requester == nil {
+ m.Requester = &TemplateInstanceRequester{}
+ }
+ if err := m.Requester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateInstanceStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateInstanceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateInstanceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, TemplateInstanceCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Objects = append(m.Objects, TemplateInstanceObject{})
+ if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TemplateList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TemplateList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Template{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto
new file mode 100644
index 0000000000..24b37bcd7e
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/generated.proto
@@ -0,0 +1,262 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.template.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/template/v1";
+
+// BrokerTemplateInstance holds the service broker-related state associated with
+// a TemplateInstance. BrokerTemplateInstance is part of an experimental API.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BrokerTemplateInstance {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec describes the state of this BrokerTemplateInstance.
+ optional BrokerTemplateInstanceSpec spec = 2;
+}
+
+// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message BrokerTemplateInstanceList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a list of BrokerTemplateInstances
+ repeated BrokerTemplateInstance items = 2;
+}
+
+// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.
+message BrokerTemplateInstanceSpec {
+ // templateinstance is a reference to a TemplateInstance object residing
+ // in a namespace.
+ optional k8s.io.api.core.v1.ObjectReference templateInstance = 1;
+
+ // secret is a reference to a Secret object residing in a namespace,
+ // containing the necessary template parameters.
+ optional k8s.io.api.core.v1.ObjectReference secret = 2;
+
+ // bindingids is a list of 'binding_id's provided during successive bind
+ // calls to the template service broker.
+ repeated string bindingIDs = 3;
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message ExtraValue {
+ // items, if empty, will result in an empty slice
+
+ repeated string items = 1;
+}
+
+// Parameter defines a name/value variable that is to be processed during
+// the Template to Config transformation.
+message Parameter {
+ // Name must be set and it can be referenced in Template
+ // Items using ${PARAMETER_NAME}. Required.
+ optional string name = 1;
+
+ // Optional: The name that will show in UI instead of parameter 'Name'
+ optional string displayName = 2;
+
+ // Description of a parameter. Optional.
+ optional string description = 3;
+
+ // Value holds the Parameter data. If specified, the generator will be
+ // ignored. The value replaces all occurrences of the Parameter ${Name}
+ // expression during the Template to Config transformation. Optional.
+ optional string value = 4;
+
+ // generate specifies the generator to be used to generate random string
+ // from an input value specified by From field. The result string is
+ // stored into Value field. If empty, no generator is being used, leaving
+ // the result Value untouched. Optional.
+ //
+ // The only supported generator is "expression", which accepts a "from"
+ // value in the form of a simple regular expression containing the
+ // range expression "[a-zA-Z0-9]", and the length expression "a{length}".
+ //
+ // Examples:
+ //
+ // from | value
+ // -----------------------------
+ // "test[0-9]{1}x" | "test7x"
+ // "[0-1]{8}" | "01001100"
+ // "0x[A-F0-9]{4}" | "0xB3AF"
+ // "[a-zA-Z0-9]{8}" | "hW4yQU5i"
+ optional string generate = 5;
+
+ // From is an input value for the generator. Optional.
+ optional string from = 6;
+
+ // Optional: Indicates the parameter must have a value. Defaults to false.
+ optional bool required = 7;
+}
+
+// Template contains the inputs needed to produce a Config.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message Template {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // message is an optional instructional message that will
+ // be displayed when this template is instantiated.
+ // This field should inform the user how to utilize the newly created resources.
+ // Parameter substitution will be performed on the message before being
+ // displayed so that generated credentials and other parameters can be
+ // included in the output.
+ optional string message = 2;
+
+ // objects is an array of resources to include in this template.
+ // If a namespace value is hardcoded in the object, it will be removed
+ // during template instantiation, however if the namespace value
+ // is, or contains, a ${PARAMETER_REFERENCE}, the resolved
+ // value after parameter substitution will be respected and the object
+ // will be created in that namespace.
+ // +kubebuilder:pruning:PreserveUnknownFields
+ repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3;
+
+ // parameters is an optional array of Parameters used during the
+ // Template to Config transformation.
+ repeated Parameter parameters = 4;
+
+ // labels is a optional set of labels that are applied to every
+ // object during the Template to Config transformation.
+ map labels = 5;
+}
+
+// TemplateInstance requests and records the instantiation of a Template.
+// TemplateInstance is part of an experimental API.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message TemplateInstance {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec describes the desired state of this TemplateInstance.
+ optional TemplateInstanceSpec spec = 2;
+
+ // status describes the current state of this TemplateInstance.
+ // +optional
+ optional TemplateInstanceStatus status = 3;
+}
+
+// TemplateInstanceCondition contains condition information for a
+// TemplateInstance.
+message TemplateInstanceCondition {
+ // Type of the condition, currently Ready or InstantiateFailure.
+ optional string type = 1;
+
+ // Status of the condition, one of True, False or Unknown.
+ optional string status = 2;
+
+ // LastTransitionTime is the last time a condition status transitioned from
+ // one state to another.
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // Reason is a brief machine readable explanation for the condition's last
+ // transition.
+ optional string reason = 4;
+
+ // Message is a human readable description of the details of the last
+ // transition, complementing reason.
+ optional string message = 5;
+}
+
+// TemplateInstanceList is a list of TemplateInstance objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message TemplateInstanceList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a list of Templateinstances
+ repeated TemplateInstance items = 2;
+}
+
+// TemplateInstanceObject references an object created by a TemplateInstance.
+message TemplateInstanceObject {
+ // ref is a reference to the created object. When used under .spec, only
+ // name and namespace are used; these can contain references to parameters
+ // which will be substituted following the usual rules.
+ optional k8s.io.api.core.v1.ObjectReference ref = 1;
+}
+
+// TemplateInstanceRequester holds the identity of an agent requesting a
+// template instantiation.
+message TemplateInstanceRequester {
+ // username uniquely identifies this user among all active users.
+ optional string username = 1;
+
+ // uid is a unique value that identifies this user across time; if this user is
+ // deleted and another user by the same name is added, they will have
+ // different UIDs.
+ optional string uid = 2;
+
+ // groups represent the groups this user is a part of.
+ repeated string groups = 3;
+
+ // extra holds additional information provided by the authenticator.
+ map extra = 4;
+}
+
+// TemplateInstanceSpec describes the desired state of a TemplateInstance.
+message TemplateInstanceSpec {
+ // template is a full copy of the template for instantiation.
+ optional Template template = 1;
+
+ // secret is a reference to a Secret object containing the necessary
+ // template parameters.
+ optional k8s.io.api.core.v1.LocalObjectReference secret = 2;
+
+ // requester holds the identity of the agent requesting the template
+ // instantiation.
+ // +optional
+ optional TemplateInstanceRequester requester = 3;
+}
+
+// TemplateInstanceStatus describes the current state of a TemplateInstance.
+message TemplateInstanceStatus {
+ // conditions represent the latest available observations of a
+ // TemplateInstance's current state.
+ repeated TemplateInstanceCondition conditions = 1;
+
+ // Objects references the objects created by the TemplateInstance.
+ repeated TemplateInstanceObject objects = 2;
+}
+
+// TemplateList is a list of Template objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message TemplateList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is a list of templates
+ repeated Template items = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/template/v1/legacy.go b/vendor/github.com/openshift/api/template/v1/legacy.go
new file mode 100644
index 0000000000..9266f3ac9e
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/legacy.go
@@ -0,0 +1,24 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &Template{},
+ &TemplateList{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("TemplateConfig"), &Template{})
+ scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("ProcessedTemplate"), &Template{})
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/template/v1/register.go b/vendor/github.com/openshift/api/template/v1/register.go
new file mode 100644
index 0000000000..e34ff5610b
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/register.go
@@ -0,0 +1,43 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "template.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &Template{},
+ &TemplateList{},
+ &TemplateInstance{},
+ &TemplateInstanceList{},
+ &BrokerTemplateInstance{},
+ &BrokerTemplateInstanceList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go
new file mode 100644
index 0000000000..9d95912b28
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/types.go
@@ -0,0 +1,294 @@
+package v1
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Template contains the inputs needed to produce a Config.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Template struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // message is an optional instructional message that will
+ // be displayed when this template is instantiated.
+ // This field should inform the user how to utilize the newly created resources.
+ // Parameter substitution will be performed on the message before being
+ // displayed so that generated credentials and other parameters can be
+ // included in the output.
+ Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+
+ // objects is an array of resources to include in this template.
+ // If a namespace value is hardcoded in the object, it will be removed
+ // during template instantiation, however if the namespace value
+ // is, or contains, a ${PARAMETER_REFERENCE}, the resolved
+ // value after parameter substitution will be respected and the object
+ // will be created in that namespace.
+ // +kubebuilder:pruning:PreserveUnknownFields
+ Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"`
+
+ // parameters is an optional array of Parameters used during the
+ // Template to Config transformation.
+ Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,4,rep,name=parameters"`
+
+ // labels is a optional set of labels that are applied to every
+ // object during the Template to Config transformation.
+ ObjectLabels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TemplateList is a list of Template objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type TemplateList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is a list of templates
+ Items []Template `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Parameter defines a name/value variable that is to be processed during
+// the Template to Config transformation.
+type Parameter struct {
+ // Name must be set and it can be referenced in Template
+ // Items using ${PARAMETER_NAME}. Required.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // Optional: The name that will show in UI instead of parameter 'Name'
+ DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"`
+
+ // Description of a parameter. Optional.
+ Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"`
+
+ // Value holds the Parameter data. If specified, the generator will be
+ // ignored. The value replaces all occurrences of the Parameter ${Name}
+ // expression during the Template to Config transformation. Optional.
+ Value string `json:"value,omitempty" protobuf:"bytes,4,opt,name=value"`
+
+ // generate specifies the generator to be used to generate random string
+ // from an input value specified by From field. The result string is
+ // stored into Value field. If empty, no generator is being used, leaving
+ // the result Value untouched. Optional.
+ //
+ // The only supported generator is "expression", which accepts a "from"
+ // value in the form of a simple regular expression containing the
+ // range expression "[a-zA-Z0-9]", and the length expression "a{length}".
+ //
+ // Examples:
+ //
+ // from | value
+ // -----------------------------
+ // "test[0-9]{1}x" | "test7x"
+ // "[0-1]{8}" | "01001100"
+ // "0x[A-F0-9]{4}" | "0xB3AF"
+ // "[a-zA-Z0-9]{8}" | "hW4yQU5i"
+ //
+ Generate string `json:"generate,omitempty" protobuf:"bytes,5,opt,name=generate"`
+
+ // From is an input value for the generator. Optional.
+ From string `json:"from,omitempty" protobuf:"bytes,6,opt,name=from"`
+
+ // Optional: Indicates the parameter must have a value. Defaults to false.
+ Required bool `json:"required,omitempty" protobuf:"varint,7,opt,name=required"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TemplateInstance requests and records the instantiation of a Template.
+// TemplateInstance is part of an experimental API.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type TemplateInstance struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec describes the desired state of this TemplateInstance.
+ Spec TemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // status describes the current state of this TemplateInstance.
+ // +optional
+ Status TemplateInstanceStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
+}
+
+// TemplateInstanceSpec describes the desired state of a TemplateInstance.
+type TemplateInstanceSpec struct {
+ // template is a full copy of the template for instantiation.
+ Template Template `json:"template" protobuf:"bytes,1,opt,name=template"`
+
+ // secret is a reference to a Secret object containing the necessary
+ // template parameters.
+ Secret *corev1.LocalObjectReference `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
+
+ // requester holds the identity of the agent requesting the template
+ // instantiation.
+ // +optional
+ Requester *TemplateInstanceRequester `json:"requester" protobuf:"bytes,3,opt,name=requester"`
+}
+
+// TemplateInstanceRequester holds the identity of an agent requesting a
+// template instantiation.
+type TemplateInstanceRequester struct {
+ // username uniquely identifies this user among all active users.
+ Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"`
+
+ // uid is a unique value that identifies this user across time; if this user is
+ // deleted and another user by the same name is added, they will have
+ // different UIDs.
+ UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"`
+
+ // groups represent the groups this user is a part of.
+ Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"`
+
+ // extra holds additional information provided by the authenticator.
+ Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"`
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type ExtraValue []string
+
+func (t ExtraValue) String() string {
+ return fmt.Sprintf("%v", []string(t))
+}
+
+// TemplateInstanceStatus describes the current state of a TemplateInstance.
+type TemplateInstanceStatus struct {
+ // conditions represent the latest available observations of a
+ // TemplateInstance's current state.
+ Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
+
+ // Objects references the objects created by the TemplateInstance.
+ Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"`
+}
+
+// TemplateInstanceCondition contains condition information for a
+// TemplateInstance.
+type TemplateInstanceCondition struct {
+ // Type of the condition, currently Ready or InstantiateFailure.
+ Type TemplateInstanceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TemplateInstanceConditionType"`
+ // Status of the condition, one of True, False or Unknown.
+ Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"`
+ // LastTransitionTime is the last time a condition status transitioned from
+ // one state to another.
+ LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // Reason is a brief machine readable explanation for the condition's last
+ // transition.
+ Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"`
+ // Message is a human readable description of the details of the last
+ // transition, complementing reason.
+ Message string `json:"message" protobuf:"bytes,5,opt,name=message"`
+}
+
+// TemplateInstanceConditionType is the type of condition pertaining to a
+// TemplateInstance.
+type TemplateInstanceConditionType string
+
+const (
+ // TemplateInstanceReady indicates the readiness of the template
+ // instantiation.
+ TemplateInstanceReady TemplateInstanceConditionType = "Ready"
+ // TemplateInstanceInstantiateFailure indicates the failure of the template
+ // instantiation
+ TemplateInstanceInstantiateFailure TemplateInstanceConditionType = "InstantiateFailure"
+)
+
+// TemplateInstanceObject references an object created by a TemplateInstance.
+type TemplateInstanceObject struct {
+ // ref is a reference to the created object. When used under .spec, only
+ // name and namespace are used; these can contain references to parameters
+ // which will be substituted following the usual rules.
+ Ref corev1.ObjectReference `json:"ref,omitempty" protobuf:"bytes,1,opt,name=ref"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TemplateInstanceList is a list of TemplateInstance objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type TemplateInstanceList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a list of Templateinstances
+ Items []TemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BrokerTemplateInstance holds the service broker-related state associated with
+// a TemplateInstance. BrokerTemplateInstance is part of an experimental API.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BrokerTemplateInstance struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec describes the state of this BrokerTemplateInstance.
+ Spec BrokerTemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.
+type BrokerTemplateInstanceSpec struct {
+ // templateinstance is a reference to a TemplateInstance object residing
+ // in a namespace.
+ TemplateInstance corev1.ObjectReference `json:"templateInstance" protobuf:"bytes,1,opt,name=templateInstance"`
+
+ // secret is a reference to a Secret object residing in a namespace,
+ // containing the necessary template parameters.
+ Secret corev1.ObjectReference `json:"secret" protobuf:"bytes,2,opt,name=secret"`
+
+ // bindingids is a list of 'binding_id's provided during successive bind
+ // calls to the template service broker.
+ BindingIDs []string `json:"bindingIDs,omitempty" protobuf:"bytes,3,rep,name=bindingIDs"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type BrokerTemplateInstanceList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a list of BrokerTemplateInstances
+ Items []BrokerTemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..ff14f246bd
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go
@@ -0,0 +1,394 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BrokerTemplateInstance) DeepCopyInto(out *BrokerTemplateInstance) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstance.
+func (in *BrokerTemplateInstance) DeepCopy() *BrokerTemplateInstance {
+ if in == nil {
+ return nil
+ }
+ out := new(BrokerTemplateInstance)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BrokerTemplateInstance) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BrokerTemplateInstanceList) DeepCopyInto(out *BrokerTemplateInstanceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]BrokerTemplateInstance, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceList.
+func (in *BrokerTemplateInstanceList) DeepCopy() *BrokerTemplateInstanceList {
+ if in == nil {
+ return nil
+ }
+ out := new(BrokerTemplateInstanceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BrokerTemplateInstanceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BrokerTemplateInstanceSpec) DeepCopyInto(out *BrokerTemplateInstanceSpec) {
+ *out = *in
+ out.TemplateInstance = in.TemplateInstance
+ out.Secret = in.Secret
+ if in.BindingIDs != nil {
+ in, out := &in.BindingIDs, &out.BindingIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceSpec.
+func (in *BrokerTemplateInstanceSpec) DeepCopy() *BrokerTemplateInstanceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BrokerTemplateInstanceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
+ {
+ in := &in
+ *out = make(ExtraValue, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
+func (in ExtraValue) DeepCopy() ExtraValue {
+ if in == nil {
+ return nil
+ }
+ out := new(ExtraValue)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Parameter) DeepCopyInto(out *Parameter) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter.
+func (in *Parameter) DeepCopy() *Parameter {
+ if in == nil {
+ return nil
+ }
+ out := new(Parameter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Template) DeepCopyInto(out *Template) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Objects != nil {
+ in, out := &in.Objects, &out.Objects
+ *out = make([]runtime.RawExtension, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make([]Parameter, len(*in))
+ copy(*out, *in)
+ }
+ if in.ObjectLabels != nil {
+ in, out := &in.ObjectLabels, &out.ObjectLabels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template.
+func (in *Template) DeepCopy() *Template {
+ if in == nil {
+ return nil
+ }
+ out := new(Template)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Template) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstance) DeepCopyInto(out *TemplateInstance) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstance.
+func (in *TemplateInstance) DeepCopy() *TemplateInstance {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateInstance)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TemplateInstance) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstanceCondition) DeepCopyInto(out *TemplateInstanceCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceCondition.
+func (in *TemplateInstanceCondition) DeepCopy() *TemplateInstanceCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateInstanceCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstanceList) DeepCopyInto(out *TemplateInstanceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]TemplateInstance, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceList.
+func (in *TemplateInstanceList) DeepCopy() *TemplateInstanceList {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateInstanceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TemplateInstanceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstanceObject) DeepCopyInto(out *TemplateInstanceObject) {
+ *out = *in
+ out.Ref = in.Ref
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceObject.
+func (in *TemplateInstanceObject) DeepCopy() *TemplateInstanceObject {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateInstanceObject)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstanceRequester) DeepCopyInto(out *TemplateInstanceRequester) {
+ *out = *in
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Extra != nil {
+ in, out := &in.Extra, &out.Extra
+ *out = make(map[string]ExtraValue, len(*in))
+ for key, val := range *in {
+ var outVal []string
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = make(ExtraValue, len(*in))
+ copy(*out, *in)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceRequester.
+func (in *TemplateInstanceRequester) DeepCopy() *TemplateInstanceRequester {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateInstanceRequester)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstanceSpec) DeepCopyInto(out *TemplateInstanceSpec) {
+ *out = *in
+ in.Template.DeepCopyInto(&out.Template)
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(corev1.LocalObjectReference)
+ **out = **in
+ }
+ if in.Requester != nil {
+ in, out := &in.Requester, &out.Requester
+ *out = new(TemplateInstanceRequester)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceSpec.
+func (in *TemplateInstanceSpec) DeepCopy() *TemplateInstanceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateInstanceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateInstanceStatus) DeepCopyInto(out *TemplateInstanceStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]TemplateInstanceCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Objects != nil {
+ in, out := &in.Objects, &out.Objects
+ *out = make([]TemplateInstanceObject, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceStatus.
+func (in *TemplateInstanceStatus) DeepCopy() *TemplateInstanceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateInstanceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TemplateList) DeepCopyInto(out *TemplateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Template, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateList.
+func (in *TemplateList) DeepCopy() *TemplateList {
+ if in == nil {
+ return nil
+ }
+ out := new(TemplateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TemplateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..8ed3822c8d
--- /dev/null
+++ b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,159 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_BrokerTemplateInstance = map[string]string{
+ "": "BrokerTemplateInstance holds the service broker-related state associated with a TemplateInstance. BrokerTemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec describes the state of this BrokerTemplateInstance.",
+}
+
+func (BrokerTemplateInstance) SwaggerDoc() map[string]string {
+ return map_BrokerTemplateInstance
+}
+
+var map_BrokerTemplateInstanceList = map[string]string{
+ "": "BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is a list of BrokerTemplateInstances",
+}
+
+func (BrokerTemplateInstanceList) SwaggerDoc() map[string]string {
+ return map_BrokerTemplateInstanceList
+}
+
+var map_BrokerTemplateInstanceSpec = map[string]string{
+ "": "BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.",
+ "templateInstance": "templateinstance is a reference to a TemplateInstance object residing in a namespace.",
+ "secret": "secret is a reference to a Secret object residing in a namespace, containing the necessary template parameters.",
+ "bindingIDs": "bindingids is a list of 'binding_id's provided during successive bind calls to the template service broker.",
+}
+
+func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string {
+ return map_BrokerTemplateInstanceSpec
+}
+
+var map_Parameter = map[string]string{
+ "": "Parameter defines a name/value variable that is to be processed during the Template to Config transformation.",
+ "name": "Name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.",
+ "displayName": "Optional: The name that will show in UI instead of parameter 'Name'",
+ "description": "Description of a parameter. Optional.",
+ "value": "Value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.",
+ "generate": "generate specifies the generator to be used to generate random string from an input value specified by From field. The result string is stored into Value field. If empty, no generator is being used, leaving the result Value untouched. Optional.\n\nThe only supported generator is \"expression\", which accepts a \"from\" value in the form of a simple regular expression containing the range expression \"[a-zA-Z0-9]\", and the length expression \"a{length}\".\n\nExamples:\n\nfrom | value",
+ "from": "From is an input value for the generator. Optional.",
+ "required": "Optional: Indicates the parameter must have a value. Defaults to false.",
+}
+
+func (Parameter) SwaggerDoc() map[string]string {
+ return map_Parameter
+}
+
+var map_Template = map[string]string{
+ "": "Template contains the inputs needed to produce a Config.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "message": "message is an optional instructional message that will be displayed when this template is instantiated. This field should inform the user how to utilize the newly created resources. Parameter substitution will be performed on the message before being displayed so that generated credentials and other parameters can be included in the output.",
+ "objects": "objects is an array of resources to include in this template. If a namespace value is hardcoded in the object, it will be removed during template instantiation, however if the namespace value is, or contains, a ${PARAMETER_REFERENCE}, the resolved value after parameter substitution will be respected and the object will be created in that namespace.",
+ "parameters": "parameters is an optional array of Parameters used during the Template to Config transformation.",
+ "labels": "labels is a optional set of labels that are applied to every object during the Template to Config transformation.",
+}
+
+func (Template) SwaggerDoc() map[string]string {
+ return map_Template
+}
+
+var map_TemplateInstance = map[string]string{
+ "": "TemplateInstance requests and records the instantiation of a Template. TemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "spec describes the desired state of this TemplateInstance.",
+ "status": "status describes the current state of this TemplateInstance.",
+}
+
+func (TemplateInstance) SwaggerDoc() map[string]string {
+ return map_TemplateInstance
+}
+
+var map_TemplateInstanceCondition = map[string]string{
+ "": "TemplateInstanceCondition contains condition information for a TemplateInstance.",
+ "type": "Type of the condition, currently Ready or InstantiateFailure.",
+ "status": "Status of the condition, one of True, False or Unknown.",
+ "lastTransitionTime": "LastTransitionTime is the last time a condition status transitioned from one state to another.",
+ "reason": "Reason is a brief machine readable explanation for the condition's last transition.",
+ "message": "Message is a human readable description of the details of the last transition, complementing reason.",
+}
+
+func (TemplateInstanceCondition) SwaggerDoc() map[string]string {
+ return map_TemplateInstanceCondition
+}
+
+var map_TemplateInstanceList = map[string]string{
+ "": "TemplateInstanceList is a list of TemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items is a list of Templateinstances",
+}
+
+func (TemplateInstanceList) SwaggerDoc() map[string]string {
+ return map_TemplateInstanceList
+}
+
+var map_TemplateInstanceObject = map[string]string{
+ "": "TemplateInstanceObject references an object created by a TemplateInstance.",
+ "ref": "ref is a reference to the created object. When used under .spec, only name and namespace are used; these can contain references to parameters which will be substituted following the usual rules.",
+}
+
+func (TemplateInstanceObject) SwaggerDoc() map[string]string {
+ return map_TemplateInstanceObject
+}
+
+var map_TemplateInstanceRequester = map[string]string{
+ "": "TemplateInstanceRequester holds the identity of an agent requesting a template instantiation.",
+ "username": "username uniquely identifies this user among all active users.",
+ "uid": "uid is a unique value that identifies this user across time; if this user is deleted and another user by the same name is added, they will have different UIDs.",
+ "groups": "groups represent the groups this user is a part of.",
+ "extra": "extra holds additional information provided by the authenticator.",
+}
+
+func (TemplateInstanceRequester) SwaggerDoc() map[string]string {
+ return map_TemplateInstanceRequester
+}
+
+var map_TemplateInstanceSpec = map[string]string{
+ "": "TemplateInstanceSpec describes the desired state of a TemplateInstance.",
+ "template": "template is a full copy of the template for instantiation.",
+ "secret": "secret is a reference to a Secret object containing the necessary template parameters.",
+ "requester": "requester holds the identity of the agent requesting the template instantiation.",
+}
+
+func (TemplateInstanceSpec) SwaggerDoc() map[string]string {
+ return map_TemplateInstanceSpec
+}
+
+var map_TemplateInstanceStatus = map[string]string{
+ "": "TemplateInstanceStatus describes the current state of a TemplateInstance.",
+ "conditions": "conditions represent the latest available observations of a TemplateInstance's current state.",
+ "objects": "Objects references the objects created by the TemplateInstance.",
+}
+
+func (TemplateInstanceStatus) SwaggerDoc() map[string]string {
+ return map_TemplateInstanceStatus
+}
+
+var map_TemplateList = map[string]string{
+ "": "TemplateList is a list of Template objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is a list of templates",
+}
+
+func (TemplateList) SwaggerDoc() map[string]string {
+ return map_TemplateList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/api/user/install.go b/vendor/github.com/openshift/api/user/install.go
new file mode 100644
index 0000000000..28d4980621
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/install.go
@@ -0,0 +1,26 @@
+package user
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ userv1 "github.com/openshift/api/user/v1"
+)
+
+const (
+ GroupName = "user.openshift.io"
+)
+
+var (
+ schemeBuilder = runtime.NewSchemeBuilder(userv1.Install)
+ // Install is a function which adds every version of this group to a scheme
+ Install = schemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+func Kind(kind string) schema.GroupKind {
+ return schema.GroupKind{Group: GroupName, Kind: kind}
+}
diff --git a/vendor/github.com/openshift/api/user/v1/doc.go b/vendor/github.com/openshift/api/user/v1/doc.go
new file mode 100644
index 0000000000..42287095e2
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/doc.go
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/openshift/origin/pkg/user/apis/user
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+
+// +groupName=user.openshift.io
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/openshift/api/user/v1/generated.pb.go b/vendor/github.com/openshift/api/user/v1/generated.pb.go
new file mode 100644
index 0000000000..0689ed3899
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/generated.pb.go
@@ -0,0 +1,2274 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/openshift/api/user/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Group) Reset() { *m = Group{} }
+func (*Group) ProtoMessage() {}
+func (*Group) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{0}
+}
+func (m *Group) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Group) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Group.Merge(m, src)
+}
+func (m *Group) XXX_Size() int {
+ return m.Size()
+}
+func (m *Group) XXX_DiscardUnknown() {
+ xxx_messageInfo_Group.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Group proto.InternalMessageInfo
+
+func (m *GroupList) Reset() { *m = GroupList{} }
+func (*GroupList) ProtoMessage() {}
+func (*GroupList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{1}
+}
+func (m *GroupList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *GroupList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GroupList.Merge(m, src)
+}
+func (m *GroupList) XXX_Size() int {
+ return m.Size()
+}
+func (m *GroupList) XXX_DiscardUnknown() {
+ xxx_messageInfo_GroupList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupList proto.InternalMessageInfo
+
+func (m *Identity) Reset() { *m = Identity{} }
+func (*Identity) ProtoMessage() {}
+func (*Identity) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{2}
+}
+func (m *Identity) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Identity) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Identity.Merge(m, src)
+}
+func (m *Identity) XXX_Size() int {
+ return m.Size()
+}
+func (m *Identity) XXX_DiscardUnknown() {
+ xxx_messageInfo_Identity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Identity proto.InternalMessageInfo
+
+func (m *IdentityList) Reset() { *m = IdentityList{} }
+func (*IdentityList) ProtoMessage() {}
+func (*IdentityList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{3}
+}
+func (m *IdentityList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IdentityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IdentityList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IdentityList.Merge(m, src)
+}
+func (m *IdentityList) XXX_Size() int {
+ return m.Size()
+}
+func (m *IdentityList) XXX_DiscardUnknown() {
+ xxx_messageInfo_IdentityList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IdentityList proto.InternalMessageInfo
+
+func (m *OptionalNames) Reset() { *m = OptionalNames{} }
+func (*OptionalNames) ProtoMessage() {}
+func (*OptionalNames) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{4}
+}
+func (m *OptionalNames) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *OptionalNames) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OptionalNames.Merge(m, src)
+}
+func (m *OptionalNames) XXX_Size() int {
+ return m.Size()
+}
+func (m *OptionalNames) XXX_DiscardUnknown() {
+ xxx_messageInfo_OptionalNames.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OptionalNames proto.InternalMessageInfo
+
+func (m *User) Reset() { *m = User{} }
+func (*User) ProtoMessage() {}
+func (*User) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{5}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *User) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_User.Merge(m, src)
+}
+func (m *User) XXX_Size() int {
+ return m.Size()
+}
+func (m *User) XXX_DiscardUnknown() {
+ xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
+
+func (m *UserIdentityMapping) Reset() { *m = UserIdentityMapping{} }
+func (*UserIdentityMapping) ProtoMessage() {}
+func (*UserIdentityMapping) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{6}
+}
+func (m *UserIdentityMapping) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UserIdentityMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *UserIdentityMapping) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserIdentityMapping.Merge(m, src)
+}
+func (m *UserIdentityMapping) XXX_Size() int {
+ return m.Size()
+}
+func (m *UserIdentityMapping) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserIdentityMapping.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserIdentityMapping proto.InternalMessageInfo
+
+func (m *UserList) Reset() { *m = UserList{} }
+func (*UserList) ProtoMessage() {}
+func (*UserList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ea159b02d89a1362, []int{7}
+}
+func (m *UserList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UserList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *UserList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserList.Merge(m, src)
+}
+func (m *UserList) XXX_Size() int {
+ return m.Size()
+}
+func (m *UserList) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserList proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Group)(nil), "github.com.openshift.api.user.v1.Group")
+ proto.RegisterType((*GroupList)(nil), "github.com.openshift.api.user.v1.GroupList")
+ proto.RegisterType((*Identity)(nil), "github.com.openshift.api.user.v1.Identity")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.user.v1.Identity.ExtraEntry")
+ proto.RegisterType((*IdentityList)(nil), "github.com.openshift.api.user.v1.IdentityList")
+ proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.user.v1.OptionalNames")
+ proto.RegisterType((*User)(nil), "github.com.openshift.api.user.v1.User")
+ proto.RegisterType((*UserIdentityMapping)(nil), "github.com.openshift.api.user.v1.UserIdentityMapping")
+ proto.RegisterType((*UserList)(nil), "github.com.openshift.api.user.v1.UserList")
+}
+
+func init() {
+ proto.RegisterFile("github.com/openshift/api/user/v1/generated.proto", fileDescriptor_ea159b02d89a1362)
+}
+
+var fileDescriptor_ea159b02d89a1362 = []byte{
+ // 726 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x13, 0x4b,
+ 0x14, 0xf5, 0xc4, 0xde, 0xc8, 0x9e, 0x38, 0x4f, 0xd6, 0xbe, 0x14, 0x2b, 0x17, 0x6b, 0x6b, 0x9f,
+ 0xf4, 0x88, 0x10, 0xcc, 0x26, 0x11, 0x20, 0x2b, 0xa5, 0x45, 0x82, 0x22, 0x12, 0x12, 0x46, 0xa2,
+ 0x89, 0x28, 0x98, 0xd8, 0xe3, 0xf5, 0x60, 0xef, 0x87, 0x76, 0x67, 0x2d, 0xdc, 0xe5, 0x27, 0x40,
+ 0x47, 0xc9, 0x9f, 0x40, 0x14, 0x88, 0x3e, 0x74, 0x29, 0x53, 0x20, 0x8b, 0x2c, 0x1d, 0xbf, 0x02,
+ 0xcd, 0xec, 0x87, 0xd7, 0xf9, 0x90, 0x23, 0x21, 0xb9, 0xdb, 0xb9, 0x73, 0xcf, 0x99, 0x73, 0xcf,
+ 0xbd, 0xd7, 0x32, 0xdc, 0xb0, 0x18, 0xef, 0x87, 0x27, 0xa8, 0xe3, 0xda, 0xa6, 0xeb, 0x51, 0x27,
+ 0xe8, 0xb3, 0x1e, 0x37, 0x89, 0xc7, 0xcc, 0x30, 0xa0, 0xbe, 0x39, 0xda, 0x34, 0x2d, 0xea, 0x50,
+ 0x9f, 0x70, 0xda, 0x45, 0x9e, 0xef, 0x72, 0x57, 0x6d, 0x4e, 0x11, 0x28, 0x43, 0x20, 0xe2, 0x31,
+ 0x24, 0x10, 0x68, 0xb4, 0x59, 0x7f, 0x98, 0xe3, 0xb4, 0x5c, 0xcb, 0x35, 0x25, 0xf0, 0x24, 0xec,
+ 0xc9, 0x93, 0x3c, 0xc8, 0xaf, 0x98, 0xb0, 0x6e, 0x0c, 0x5a, 0x01, 0x62, 0xae, 0x7c, 0xb4, 0xe3,
+ 0xfa, 0xf4, 0x86, 0x47, 0xeb, 0x8f, 0xa6, 0x39, 0x36, 0xe9, 0xf4, 0x99, 0x43, 0xfd, 0xb1, 0xe9,
+ 0x0d, 0x2c, 0x11, 0x08, 0x4c, 0x9b, 0x72, 0x72, 0x13, 0xea, 0xc9, 0x6d, 0x28, 0x3f, 0x74, 0x38,
+ 0xb3, 0xa9, 0x19, 0x74, 0xfa, 0xd4, 0x26, 0x57, 0x71, 0xc6, 0x57, 0x00, 0x95, 0x67, 0xbe, 0x1b,
+ 0x7a, 0xea, 0x1b, 0x58, 0x16, 0xe4, 0x5d, 0xc2, 0x89, 0x06, 0x9a, 0x60, 0x7d, 0x65, 0x6b, 0x03,
+ 0xc5, 0xa4, 0x28, 0x4f, 0x8a, 0xbc, 0x81, 0x25, 0x02, 0x01, 0x12, 0xd9, 0x68, 0xb4, 0x89, 0x0e,
+ 0x4f, 0xde, 0xd2, 0x0e, 0x3f, 0xa0, 0x9c, 0xb4, 0xd5, 0xb3, 0x49, 0xa3, 0x10, 0x4d, 0x1a, 0x70,
+ 0x1a, 0xc3, 0x19, 0xab, 0x7a, 0x04, 0x15, 0xe1, 0x5b, 0xa0, 0x2d, 0x49, 0x7a, 0x13, 0xcd, 0xb3,
+ 0x17, 0x1d, 0x7a, 0x9c, 0xb9, 0x0e, 0x19, 0xbe, 0x20, 0x36, 0x0d, 0xda, 0x95, 0x68, 0xd2, 0x50,
+ 0x5e, 0x09, 0x06, 0x1c, 0x13, 0x19, 0x5f, 0x00, 0xac, 0x48, 0xf5, 0xfb, 0x2c, 0xe0, 0xea, 0xeb,
+ 0x6b, 0x15, 0xa0, 0xbb, 0x55, 0x20, 0xd0, 0x52, 0x7f, 0x2d, 0xd1, 0x5f, 0x4e, 0x23, 0x39, 0xf5,
+ 0xfb, 0x50, 0x61, 0x9c, 0xda, 0x42, 0x7d, 0x71, 0x7d, 0x65, 0xeb, 0xde, 0x7c, 0xf5, 0x52, 0x59,
+ 0x7b, 0x35, 0xe1, 0x54, 0xf6, 0x04, 0x1a, 0xc7, 0x24, 0xc6, 0xf7, 0x22, 0x2c, 0xef, 0x75, 0xa9,
+ 0xc3, 0x19, 0x1f, 0x2f, 0xc0, 0xfa, 0x16, 0xac, 0x7a, 0xbe, 0x3b, 0x62, 0x5d, 0xea, 0x0b, 0x2f,
+ 0x65, 0x07, 0x2a, 0xed, 0xb5, 0x04, 0x53, 0x3d, 0xca, 0xdd, 0xe1, 0x99, 0x4c, 0xf5, 0x29, 0xac,
+ 0xa5, 0x67, 0x61, 0xbd, 0x44, 0x17, 0x25, 0x5a, 0x4b, 0xd0, 0xb5, 0xa3, 0x2b, 0xf7, 0xf8, 0x1a,
+ 0x42, 0xdd, 0x81, 0x25, 0xe1, 0x8a, 0x56, 0x92, 0xd5, 0xfd, 0x97, 0xab, 0x0e, 0x89, 0x3d, 0x98,
+ 0xd6, 0x82, 0x69, 0x8f, 0xfa, 0xd4, 0xe9, 0xd0, 0x76, 0x35, 0xa1, 0x2f, 0x09, 0x12, 0x2c, 0xe1,
+ 0xea, 0x31, 0x54, 0xe8, 0x3b, 0xee, 0x13, 0x4d, 0x91, 0x3d, 0x78, 0x3c, 0xbf, 0x07, 0xa9, 0xc7,
+ 0x68, 0x47, 0xe0, 0x76, 0x1c, 0xee, 0x8f, 0xa7, 0x1d, 0x91, 0x31, 0x1c, 0x53, 0xd6, 0x5b, 0x10,
+ 0x4e, 0x73, 0xd4, 0x1a, 0x2c, 0x0e, 0xe8, 0x58, 0x76, 0xa3, 0x82, 0xc5, 0xa7, 0xba, 0x06, 0x95,
+ 0x11, 0x19, 0x86, 0x89, 0x77, 0x38, 0x3e, 0x6c, 0x2f, 0xb5, 0x80, 0xf1, 0x0d, 0xc0, 0x6a, 0xfa,
+ 0xce, 0x02, 0x06, 0xf1, 0x70, 0x76, 0x10, 0xef, 0xdf, 0xdd, 0x84, 0x5b, 0x66, 0x71, 0x1b, 0xae,
+ 0xce, 0x2c, 0x9a, 0xda, 0x48, 0x5f, 0x00, 0xcd, 0xe2, 0x7a, 0x25, 0xde, 0xbb, 0x3c, 0x62, 0xbb,
+ 0xfc, 0xf1, 0x53, 0xa3, 0x70, 0xfa, 0xa3, 0x59, 0x30, 0x7e, 0x03, 0x28, 0x1b, 0xb4, 0x80, 0x19,
+ 0x7e, 0x00, 0xcb, 0xbd, 0x70, 0x38, 0xcc, 0xcd, 0x6f, 0xe6, 0xd2, 0x6e, 0x12, 0xc7, 0x59, 0x86,
+ 0x8a, 0x20, 0x64, 0x71, 0xd9, 0x8c, 0x06, 0x5a, 0x51, 0x16, 0xf2, 0x8f, 0xe0, 0xde, 0xcb, 0xa2,
+ 0x38, 0x97, 0xa1, 0x1a, 0x70, 0xd9, 0x12, 0xfb, 0x1a, 0x68, 0x25, 0x99, 0x0b, 0xa3, 0x49, 0x63,
+ 0x59, 0x6e, 0x70, 0x80, 0x93, 0x1b, 0xe3, 0xc3, 0x12, 0xfc, 0x57, 0x14, 0x9b, 0xfa, 0x79, 0x40,
+ 0x3c, 0x8f, 0x39, 0xd6, 0x02, 0x6a, 0x7f, 0x09, 0xcb, 0x89, 0xd6, 0x71, 0xf2, 0xeb, 0x79, 0xa7,
+ 0x1d, 0xca, 0x0c, 0x4a, 0x15, 0xe3, 0x8c, 0x26, 0x5b, 0xc9, 0xe2, 0x5f, 0xad, 0xa4, 0xf1, 0x19,
+ 0xc0, 0xb2, 0x38, 0x2e, 0x60, 0xf0, 0x9f, 0xcf, 0x0e, 0xfe, 0xff, 0xf3, 0x07, 0x5f, 0x08, 0xbb,
+ 0x79, 0xe8, 0xdb, 0xbb, 0x67, 0x97, 0x7a, 0xe1, 0xfc, 0x52, 0x2f, 0x5c, 0x5c, 0xea, 0x85, 0xd3,
+ 0x48, 0x07, 0x67, 0x91, 0x0e, 0xce, 0x23, 0x1d, 0x5c, 0x44, 0x3a, 0xf8, 0x19, 0xe9, 0xe0, 0xfd,
+ 0x2f, 0xbd, 0x70, 0xdc, 0x9c, 0xf7, 0x9f, 0xe1, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x85,
+ 0x81, 0x86, 0x56, 0x08, 0x00, 0x00,
+}
+
+func (m *Group) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Group) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Users != nil {
+ {
+ size, err := m.Users.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *GroupList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GroupList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Identity) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Identity) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Identity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Extra) > 0 {
+ keysForExtra := make([]string, 0, len(m.Extra))
+ for k := range m.Extra {
+ keysForExtra = append(keysForExtra, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Extra[string(keysForExtra[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForExtra[iNdEx])
+ copy(dAtA[i:], keysForExtra[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ {
+ size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.ProviderUserName)
+ copy(dAtA[i:], m.ProviderUserName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderUserName)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.ProviderName)
+ copy(dAtA[i:], m.ProviderName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IdentityList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IdentityList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IdentityList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m OptionalNames) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m[iNdEx])
+ copy(dAtA[i:], m[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *User) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *User) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Groups) > 0 {
+ for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Groups[iNdEx])
+ copy(dAtA[i:], m.Groups[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Identities) > 0 {
+ for iNdEx := len(m.Identities) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Identities[iNdEx])
+ copy(dAtA[i:], m.Identities[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Identities[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ i -= len(m.FullName)
+ copy(dAtA[i:], m.FullName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FullName)))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *UserIdentityMapping) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UserIdentityMapping) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserIdentityMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Identity.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *UserList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UserList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Group) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Users != nil {
+ l = m.Users.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *GroupList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Identity) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ProviderName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ProviderUserName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.User.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Extra) > 0 {
+ for k, v := range m.Extra {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *IdentityList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m OptionalNames) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for _, s := range m {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *User) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.FullName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Identities) > 0 {
+ for _, s := range m.Identities {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *UserIdentityMapping) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Identity.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.User.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *UserList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Group) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Group{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Users:` + strings.Replace(fmt.Sprintf("%v", this.Users), "OptionalNames", "OptionalNames", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GroupList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Group{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Group", "Group", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&GroupList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Identity) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForExtra := make([]string, 0, len(this.Extra))
+ for k := range this.Extra {
+ keysForExtra = append(keysForExtra, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ mapStringForExtra := "map[string]string{"
+ for _, k := range keysForExtra {
+ mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
+ }
+ mapStringForExtra += "}"
+ s := strings.Join([]string{`&Identity{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `ProviderName:` + fmt.Sprintf("%v", this.ProviderName) + `,`,
+ `ProviderUserName:` + fmt.Sprintf("%v", this.ProviderUserName) + `,`,
+ `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `Extra:` + mapStringForExtra + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IdentityList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Identity{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Identity", "Identity", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&IdentityList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *User) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&User{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `FullName:` + fmt.Sprintf("%v", this.FullName) + `,`,
+ `Identities:` + fmt.Sprintf("%v", this.Identities) + `,`,
+ `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UserIdentityMapping) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UserIdentityMapping{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Identity:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Identity), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *UserList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]User{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "User", "User", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&UserList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Group) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Group: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Users == nil {
+ m.Users = OptionalNames{}
+ }
+ if err := m.Users.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GroupList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GroupList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GroupList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Group{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Identity) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Identity: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Identity: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProviderName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProviderUserName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProviderUserName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Extra == nil {
+ m.Extra = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Extra[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IdentityList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IdentityList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IdentityList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Identity{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *OptionalNames) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ *m = append(*m, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *User) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: User: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FullName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FullName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Identities", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Identities = append(m.Identities, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UserIdentityMapping) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UserIdentityMapping: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UserIdentityMapping: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Identity", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Identity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UserList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UserList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UserList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, User{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/openshift/api/user/v1/generated.proto b/vendor/github.com/openshift/api/user/v1/generated.proto
new file mode 100644
index 0000000000..5b8a2eb12c
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/generated.proto
@@ -0,0 +1,144 @@
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.openshift.api.user.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/openshift/api/user/v1";
+
+// Group represents a referenceable set of Users
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message Group {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Users is the list of users in this group.
+ optional OptionalNames users = 2;
+}
+
+// GroupList is a collection of Groups
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message GroupList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of groups
+ repeated Group items = 2;
+}
+
+// Identity records a successful authentication of a user with an identity provider. The
+// information about the source of authentication is stored on the identity, and the identity
+// is then associated with a single user object. Multiple identities can reference a single
+// user. Information retrieved from the authentication provider is stored in the extra field
+// using a schema determined by the provider.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message Identity {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // ProviderName is the source of identity information
+ optional string providerName = 2;
+
+ // ProviderUserName uniquely represents this identity in the scope of the provider
+ optional string providerUserName = 3;
+
+ // User is a reference to the user this identity is associated with
+ // Both Name and UID must be set
+ optional k8s.io.api.core.v1.ObjectReference user = 4;
+
+ // Extra holds extra information about this identity
+ map extra = 5;
+}
+
+// IdentityList is a collection of Identities
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message IdentityList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of identities
+ repeated Identity items = 2;
+}
+
+// OptionalNames is an array that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message OptionalNames {
+ // items, if empty, will result in an empty slice
+
+ repeated string items = 1;
+}
+
+// Upon log in, every user of the system receives a User and Identity resource. Administrators
+// may directly manipulate the attributes of the users for their own tracking, or set groups
+// via the API. The user name is unique and is chosen based on the value provided by the
+// identity provider - if a user already exists with the incoming name, the user name may have
+// a number appended to it depending on the configuration of the system.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message User {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // FullName is the full name of user
+ optional string fullName = 2;
+
+ // Identities are the identities associated with this user
+ // +optional
+ repeated string identities = 3;
+
+ // Groups specifies group names this user is a member of.
+ // This field is deprecated and will be removed in a future release.
+ // Instead, create a Group object containing the name of this User.
+ repeated string groups = 4;
+}
+
+// UserIdentityMapping maps a user to an identity
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message UserIdentityMapping {
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Identity is a reference to an identity
+ optional k8s.io.api.core.v1.ObjectReference identity = 2;
+
+ // User is a reference to a user
+ optional k8s.io.api.core.v1.ObjectReference user = 3;
+}
+
+// UserList is a collection of Users
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+message UserList {
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of users
+ repeated User items = 2;
+}
+
diff --git a/vendor/github.com/openshift/api/user/v1/legacy.go b/vendor/github.com/openshift/api/user/v1/legacy.go
new file mode 100644
index 0000000000..6817a9f1f3
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/legacy.go
@@ -0,0 +1,27 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+ legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme)
+ DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme
+)
+
+func addLegacyKnownTypes(scheme *runtime.Scheme) error {
+ types := []runtime.Object{
+ &User{},
+ &UserList{},
+ &Identity{},
+ &IdentityList{},
+ &UserIdentityMapping{},
+ &Group{},
+ &GroupList{},
+ }
+ scheme.AddKnownTypes(legacyGroupVersion, types...)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/user/v1/register.go b/vendor/github.com/openshift/api/user/v1/register.go
new file mode 100644
index 0000000000..11341d72a9
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/register.go
@@ -0,0 +1,44 @@
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var (
+ GroupName = "user.openshift.io"
+ GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+ schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme)
+ // Install is a function which adds this version to a scheme
+ Install = schemeBuilder.AddToScheme
+
+ // SchemeGroupVersion generated code relies on this name
+ // Deprecated
+ SchemeGroupVersion = GroupVersion
+ // AddToScheme exists solely to keep the old generators creating valid code
+ // DEPRECATED
+ AddToScheme = schemeBuilder.AddToScheme
+)
+
+// Resource generated code relies on this being here, but it logically belongs to the group
+// DEPRECATED
+func Resource(resource string) schema.GroupResource {
+ return schema.GroupResource{Group: GroupName, Resource: resource}
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(GroupVersion,
+ &User{},
+ &UserList{},
+ &Identity{},
+ &IdentityList{},
+ &UserIdentityMapping{},
+ &Group{},
+ &GroupList{},
+ )
+ metav1.AddToGroupVersion(scheme, GroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/user/v1/types.go b/vendor/github.com/openshift/api/user/v1/types.go
new file mode 100644
index 0000000000..7014bbfac7
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/types.go
@@ -0,0 +1,174 @@
+package v1
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Upon log in, every user of the system receives a User and Identity resource. Administrators
+// may directly manipulate the attributes of the users for their own tracking, or set groups
+// via the API. The user name is unique and is chosen based on the value provided by the
+// identity provider - if a user already exists with the incoming name, the user name may have
+// a number appended to it depending on the configuration of the system.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type User struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // FullName is the full name of user
+ FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"`
+
+ // Identities are the identities associated with this user
+ // +optional
+ Identities []string `json:"identities,omitempty" protobuf:"bytes,3,rep,name=identities"`
+
+ // Groups specifies group names this user is a member of.
+ // This field is deprecated and will be removed in a future release.
+ // Instead, create a Group object containing the name of this User.
+ Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// UserList is a collection of Users
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type UserList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of users
+ Items []User `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Identity records a successful authentication of a user with an identity provider. The
+// information about the source of authentication is stored on the identity, and the identity
+// is then associated with a single user object. Multiple identities can reference a single
+// user. Information retrieved from the authentication provider is stored in the extra field
+// using a schema determined by the provider.
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Identity struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // ProviderName is the source of identity information
+ ProviderName string `json:"providerName" protobuf:"bytes,2,opt,name=providerName"`
+
+ // ProviderUserName uniquely represents this identity in the scope of the provider
+ ProviderUserName string `json:"providerUserName" protobuf:"bytes,3,opt,name=providerUserName"`
+
+ // User is a reference to the user this identity is associated with
+ // Both Name and UID must be set
+ User corev1.ObjectReference `json:"user" protobuf:"bytes,4,opt,name=user"`
+
+ // Extra holds extra information about this identity
+ Extra map[string]string `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IdentityList is a collection of Identities
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type IdentityList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of identities
+ Items []Identity `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:onlyVerbs=get,create,update,delete
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// UserIdentityMapping maps a user to an identity
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type UserIdentityMapping struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Identity is a reference to an identity
+ Identity corev1.ObjectReference `json:"identity,omitempty" protobuf:"bytes,2,opt,name=identity"`
+ // User is a reference to a user
+ User corev1.ObjectReference `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+}
+
+// OptionalNames is an array that may also be left nil to distinguish between set and unset.
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type OptionalNames []string
+
+func (t OptionalNames) String() string {
+ return fmt.Sprintf("%v", []string(t))
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Group represents a referenceable set of Users
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type Group struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Users is the list of users in this group.
+ Users OptionalNames `json:"users" protobuf:"bytes,2,rep,name=users"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GroupList is a collection of Groups
+//
+// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).
+// +openshift:compatibility-gen:level=1
+type GroupList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata is the standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of groups
+ Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..e6b2fb867c
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go
@@ -0,0 +1,258 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Group) DeepCopyInto(out *Group) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Users != nil {
+ in, out := &in.Users, &out.Users
+ *out = make(OptionalNames, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group.
+func (in *Group) DeepCopy() *Group {
+ if in == nil {
+ return nil
+ }
+ out := new(Group)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Group) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupList) DeepCopyInto(out *GroupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Group, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList.
+func (in *GroupList) DeepCopy() *GroupList {
+ if in == nil {
+ return nil
+ }
+ out := new(GroupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GroupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Identity) DeepCopyInto(out *Identity) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.User = in.User
+ if in.Extra != nil {
+ in, out := &in.Extra, &out.Extra
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Identity.
+func (in *Identity) DeepCopy() *Identity {
+ if in == nil {
+ return nil
+ }
+ out := new(Identity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Identity) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityList) DeepCopyInto(out *IdentityList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Identity, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityList.
+func (in *IdentityList) DeepCopy() *IdentityList {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IdentityList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in OptionalNames) DeepCopyInto(out *OptionalNames) {
+ {
+ in := &in
+ *out = make(OptionalNames, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames.
+func (in OptionalNames) DeepCopy() OptionalNames {
+ if in == nil {
+ return nil
+ }
+ out := new(OptionalNames)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *User) DeepCopyInto(out *User) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Identities != nil {
+ in, out := &in.Identities, &out.Identities
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Groups != nil {
+ in, out := &in.Groups, &out.Groups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User.
+func (in *User) DeepCopy() *User {
+ if in == nil {
+ return nil
+ }
+ out := new(User)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *User) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserIdentityMapping) DeepCopyInto(out *UserIdentityMapping) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Identity = in.Identity
+ out.User = in.User
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityMapping.
+func (in *UserIdentityMapping) DeepCopy() *UserIdentityMapping {
+ if in == nil {
+ return nil
+ }
+ out := new(UserIdentityMapping)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UserIdentityMapping) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserList) DeepCopyInto(out *UserList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]User, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList.
+func (in *UserList) DeepCopy() *UserList {
+ if in == nil {
+ return nil
+ }
+ out := new(UserList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UserList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go
new file mode 100644
index 0000000000..5844723a72
--- /dev/null
+++ b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go
@@ -0,0 +1,90 @@
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE
+var map_Group = map[string]string{
+ "": "Group represents a referenceable set of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "users": "Users is the list of users in this group.",
+}
+
+func (Group) SwaggerDoc() map[string]string {
+ return map_Group
+}
+
+var map_GroupList = map[string]string{
+ "": "GroupList is a collection of Groups\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of groups",
+}
+
+func (GroupList) SwaggerDoc() map[string]string {
+ return map_GroupList
+}
+
+var map_Identity = map[string]string{
+ "": "Identity records a successful authentication of a user with an identity provider. The information about the source of authentication is stored on the identity, and the identity is then associated with a single user object. Multiple identities can reference a single user. Information retrieved from the authentication provider is stored in the extra field using a schema determined by the provider.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "providerName": "ProviderName is the source of identity information",
+ "providerUserName": "ProviderUserName uniquely represents this identity in the scope of the provider",
+ "user": "User is a reference to the user this identity is associated with Both Name and UID must be set",
+ "extra": "Extra holds extra information about this identity",
+}
+
+func (Identity) SwaggerDoc() map[string]string {
+ return map_Identity
+}
+
+var map_IdentityList = map[string]string{
+ "": "IdentityList is a collection of Identities\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of identities",
+}
+
+func (IdentityList) SwaggerDoc() map[string]string {
+ return map_IdentityList
+}
+
+var map_User = map[string]string{
+ "": "Upon log in, every user of the system receives a User and Identity resource. Administrators may directly manipulate the attributes of the users for their own tracking, or set groups via the API. The user name is unique and is chosen based on the value provided by the identity provider - if a user already exists with the incoming name, the user name may have a number appended to it depending on the configuration of the system.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "fullName": "FullName is the full name of user",
+ "identities": "Identities are the identities associated with this user",
+ "groups": "Groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.",
+}
+
+func (User) SwaggerDoc() map[string]string {
+ return map_User
+}
+
+var map_UserIdentityMapping = map[string]string{
+ "": "UserIdentityMapping maps a user to an identity\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "identity": "Identity is a reference to an identity",
+ "user": "User is a reference to a user",
+}
+
+func (UserIdentityMapping) SwaggerDoc() map[string]string {
+ return map_UserIdentityMapping
+}
+
+var map_UserList = map[string]string{
+ "": "UserList is a collection of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).",
+ "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of users",
+}
+
+func (UserList) SwaggerDoc() map[string]string {
+ return map_UserList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go
new file mode 100644
index 0000000000..d265d749c1
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudplatformstatus.go
@@ -0,0 +1,46 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AlibabaCloudPlatformStatusApplyConfiguration represents an declarative configuration of the AlibabaCloudPlatformStatus type for use
+// with apply.
+type AlibabaCloudPlatformStatusApplyConfiguration struct {
+ Region *string `json:"region,omitempty"`
+ ResourceGroupID *string `json:"resourceGroupID,omitempty"`
+ ResourceTags []AlibabaCloudResourceTagApplyConfiguration `json:"resourceTags,omitempty"`
+}
+
+// AlibabaCloudPlatformStatusApplyConfiguration constructs an declarative configuration of the AlibabaCloudPlatformStatus type for use with
+// apply.
+func AlibabaCloudPlatformStatus() *AlibabaCloudPlatformStatusApplyConfiguration {
+ return &AlibabaCloudPlatformStatusApplyConfiguration{}
+}
+
+// WithRegion sets the Region field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Region field is set to the value of the last call.
+func (b *AlibabaCloudPlatformStatusApplyConfiguration) WithRegion(value string) *AlibabaCloudPlatformStatusApplyConfiguration {
+ b.Region = &value
+ return b
+}
+
+// WithResourceGroupID sets the ResourceGroupID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceGroupID field is set to the value of the last call.
+func (b *AlibabaCloudPlatformStatusApplyConfiguration) WithResourceGroupID(value string) *AlibabaCloudPlatformStatusApplyConfiguration {
+ b.ResourceGroupID = &value
+ return b
+}
+
+// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceTags field.
+func (b *AlibabaCloudPlatformStatusApplyConfiguration) WithResourceTags(values ...*AlibabaCloudResourceTagApplyConfiguration) *AlibabaCloudPlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResourceTags")
+ }
+ b.ResourceTags = append(b.ResourceTags, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go
new file mode 100644
index 0000000000..7400289389
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/alibabacloudresourcetag.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AlibabaCloudResourceTagApplyConfiguration represents an declarative configuration of the AlibabaCloudResourceTag type for use
+// with apply.
+type AlibabaCloudResourceTagApplyConfiguration struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// AlibabaCloudResourceTagApplyConfiguration constructs an declarative configuration of the AlibabaCloudResourceTag type for use with
+// apply.
+func AlibabaCloudResourceTag() *AlibabaCloudResourceTagApplyConfiguration {
+ return &AlibabaCloudResourceTagApplyConfiguration{}
+}
+
+// WithKey sets the Key field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Key field is set to the value of the last call.
+func (b *AlibabaCloudResourceTagApplyConfiguration) WithKey(value string) *AlibabaCloudResourceTagApplyConfiguration {
+ b.Key = &value
+ return b
+}
+
+// WithValue sets the Value field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Value field is set to the value of the last call.
+func (b *AlibabaCloudResourceTagApplyConfiguration) WithValue(value string) *AlibabaCloudResourceTagApplyConfiguration {
+ b.Value = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go
new file mode 100644
index 0000000000..582186356f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// APIServerApplyConfiguration represents an declarative configuration of the APIServer type for use
+// with apply.
+type APIServerApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *APIServerSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.APIServerStatus `json:"status,omitempty"`
+}
+
+// APIServer constructs an declarative configuration of the APIServer type for use with
+// apply.
+func APIServer(name string) *APIServerApplyConfiguration {
+ b := &APIServerApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("APIServer")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractAPIServer extracts the applied configuration owned by fieldManager from
+// aPIServer. If no managedFields are found in aPIServer for fieldManager, a
+// APIServerApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// aPIServer must be a unmodified APIServer API object that was retrieved from the Kubernetes API.
+// ExtractAPIServer provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractAPIServer(aPIServer *apiconfigv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) {
+ return extractAPIServer(aPIServer, fieldManager, "")
+}
+
+// ExtractAPIServerStatus is the same as ExtractAPIServer except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractAPIServerStatus(aPIServer *apiconfigv1.APIServer, fieldManager string) (*APIServerApplyConfiguration, error) {
+ return extractAPIServer(aPIServer, fieldManager, "status")
+}
+
+func extractAPIServer(aPIServer *apiconfigv1.APIServer, fieldManager string, subresource string) (*APIServerApplyConfiguration, error) {
+ b := &APIServerApplyConfiguration{}
+ err := managedfields.ExtractInto(aPIServer, internal.Parser().Type("com.github.openshift.api.config.v1.APIServer"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(aPIServer.Name)
+
+ b.WithKind("APIServer")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithKind(value string) *APIServerApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithAPIVersion(value string) *APIServerApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithName(value string) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithGenerateName(value string) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithNamespace(value string) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithUID(value types.UID) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithResourceVersion(value string) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithGeneration(value int64) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *APIServerApplyConfiguration) WithLabels(entries map[string]string) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *APIServerApplyConfiguration) WithAnnotations(entries map[string]string) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *APIServerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *APIServerApplyConfiguration) WithFinalizers(values ...string) *APIServerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *APIServerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithSpec(value *APIServerSpecApplyConfiguration) *APIServerApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *APIServerApplyConfiguration) WithStatus(value apiconfigv1.APIServerStatus) *APIServerApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go
new file mode 100644
index 0000000000..7e5de50b59
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverencryption.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// APIServerEncryptionApplyConfiguration represents an declarative configuration of the APIServerEncryption type for use
+// with apply.
+type APIServerEncryptionApplyConfiguration struct {
+ Type *v1.EncryptionType `json:"type,omitempty"`
+}
+
+// APIServerEncryptionApplyConfiguration constructs an declarative configuration of the APIServerEncryption type for use with
+// apply.
+func APIServerEncryption() *APIServerEncryptionApplyConfiguration {
+ return &APIServerEncryptionApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *APIServerEncryptionApplyConfiguration) WithType(value v1.EncryptionType) *APIServerEncryptionApplyConfiguration {
+ b.Type = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go
new file mode 100644
index 0000000000..b55943a41e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiservernamedservingcert.go
@@ -0,0 +1,34 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// APIServerNamedServingCertApplyConfiguration represents an declarative configuration of the APIServerNamedServingCert type for use
+// with apply.
+type APIServerNamedServingCertApplyConfiguration struct {
+ Names []string `json:"names,omitempty"`
+ ServingCertificate *SecretNameReferenceApplyConfiguration `json:"servingCertificate,omitempty"`
+}
+
+// APIServerNamedServingCertApplyConfiguration constructs an declarative configuration of the APIServerNamedServingCert type for use with
+// apply.
+func APIServerNamedServingCert() *APIServerNamedServingCertApplyConfiguration {
+ return &APIServerNamedServingCertApplyConfiguration{}
+}
+
+// WithNames adds the given value to the Names field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Names field.
+func (b *APIServerNamedServingCertApplyConfiguration) WithNames(values ...string) *APIServerNamedServingCertApplyConfiguration {
+ for i := range values {
+ b.Names = append(b.Names, values[i])
+ }
+ return b
+}
+
+// WithServingCertificate sets the ServingCertificate field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ServingCertificate field is set to the value of the last call.
+func (b *APIServerNamedServingCertApplyConfiguration) WithServingCertificate(value *SecretNameReferenceApplyConfiguration) *APIServerNamedServingCertApplyConfiguration {
+ b.ServingCertificate = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go
new file mode 100644
index 0000000000..6a7084248c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverservingcerts.go
@@ -0,0 +1,28 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// APIServerServingCertsApplyConfiguration represents an declarative configuration of the APIServerServingCerts type for use
+// with apply.
+type APIServerServingCertsApplyConfiguration struct {
+ NamedCertificates []APIServerNamedServingCertApplyConfiguration `json:"namedCertificates,omitempty"`
+}
+
+// APIServerServingCertsApplyConfiguration constructs an declarative configuration of the APIServerServingCerts type for use with
+// apply.
+func APIServerServingCerts() *APIServerServingCertsApplyConfiguration {
+ return &APIServerServingCertsApplyConfiguration{}
+}
+
+// WithNamedCertificates adds the given value to the NamedCertificates field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the NamedCertificates field.
+func (b *APIServerServingCertsApplyConfiguration) WithNamedCertificates(values ...*APIServerNamedServingCertApplyConfiguration) *APIServerServingCertsApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithNamedCertificates")
+ }
+ b.NamedCertificates = append(b.NamedCertificates, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go
new file mode 100644
index 0000000000..3e9eaeac05
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserverspec.go
@@ -0,0 +1,70 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// APIServerSpecApplyConfiguration represents an declarative configuration of the APIServerSpec type for use
+// with apply.
+type APIServerSpecApplyConfiguration struct {
+ ServingCerts *APIServerServingCertsApplyConfiguration `json:"servingCerts,omitempty"`
+ ClientCA *ConfigMapNameReferenceApplyConfiguration `json:"clientCA,omitempty"`
+ AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"`
+ Encryption *APIServerEncryptionApplyConfiguration `json:"encryption,omitempty"`
+ TLSSecurityProfile *TLSSecurityProfileApplyConfiguration `json:"tlsSecurityProfile,omitempty"`
+ Audit *AuditApplyConfiguration `json:"audit,omitempty"`
+}
+
+// APIServerSpecApplyConfiguration constructs an declarative configuration of the APIServerSpec type for use with
+// apply.
+func APIServerSpec() *APIServerSpecApplyConfiguration {
+ return &APIServerSpecApplyConfiguration{}
+}
+
+// WithServingCerts sets the ServingCerts field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ServingCerts field is set to the value of the last call.
+func (b *APIServerSpecApplyConfiguration) WithServingCerts(value *APIServerServingCertsApplyConfiguration) *APIServerSpecApplyConfiguration {
+ b.ServingCerts = value
+ return b
+}
+
+// WithClientCA sets the ClientCA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientCA field is set to the value of the last call.
+func (b *APIServerSpecApplyConfiguration) WithClientCA(value *ConfigMapNameReferenceApplyConfiguration) *APIServerSpecApplyConfiguration {
+ b.ClientCA = value
+ return b
+}
+
+// WithAdditionalCORSAllowedOrigins adds the given value to the AdditionalCORSAllowedOrigins field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AdditionalCORSAllowedOrigins field.
+func (b *APIServerSpecApplyConfiguration) WithAdditionalCORSAllowedOrigins(values ...string) *APIServerSpecApplyConfiguration {
+ for i := range values {
+ b.AdditionalCORSAllowedOrigins = append(b.AdditionalCORSAllowedOrigins, values[i])
+ }
+ return b
+}
+
+// WithEncryption sets the Encryption field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Encryption field is set to the value of the last call.
+func (b *APIServerSpecApplyConfiguration) WithEncryption(value *APIServerEncryptionApplyConfiguration) *APIServerSpecApplyConfiguration {
+ b.Encryption = value
+ return b
+}
+
+// WithTLSSecurityProfile sets the TLSSecurityProfile field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TLSSecurityProfile field is set to the value of the last call.
+func (b *APIServerSpecApplyConfiguration) WithTLSSecurityProfile(value *TLSSecurityProfileApplyConfiguration) *APIServerSpecApplyConfiguration {
+ b.TLSSecurityProfile = value
+ return b
+}
+
+// WithAudit sets the Audit field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Audit field is set to the value of the last call.
+func (b *APIServerSpecApplyConfiguration) WithAudit(value *AuditApplyConfiguration) *APIServerSpecApplyConfiguration {
+ b.Audit = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go
new file mode 100644
index 0000000000..8db029e26a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/audit.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// AuditApplyConfiguration represents an declarative configuration of the Audit type for use
+// with apply.
+type AuditApplyConfiguration struct {
+ Profile *v1.AuditProfileType `json:"profile,omitempty"`
+ CustomRules []AuditCustomRuleApplyConfiguration `json:"customRules,omitempty"`
+}
+
+// AuditApplyConfiguration constructs an declarative configuration of the Audit type for use with
+// apply.
+func Audit() *AuditApplyConfiguration {
+ return &AuditApplyConfiguration{}
+}
+
+// WithProfile sets the Profile field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Profile field is set to the value of the last call.
+func (b *AuditApplyConfiguration) WithProfile(value v1.AuditProfileType) *AuditApplyConfiguration {
+ b.Profile = &value
+ return b
+}
+
+// WithCustomRules adds the given value to the CustomRules field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the CustomRules field.
+func (b *AuditApplyConfiguration) WithCustomRules(values ...*AuditCustomRuleApplyConfiguration) *AuditApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithCustomRules")
+ }
+ b.CustomRules = append(b.CustomRules, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go
new file mode 100644
index 0000000000..80719443e4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/auditcustomrule.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// AuditCustomRuleApplyConfiguration represents an declarative configuration of the AuditCustomRule type for use
+// with apply.
+type AuditCustomRuleApplyConfiguration struct {
+ Group *string `json:"group,omitempty"`
+ Profile *v1.AuditProfileType `json:"profile,omitempty"`
+}
+
+// AuditCustomRuleApplyConfiguration constructs an declarative configuration of the AuditCustomRule type for use with
+// apply.
+func AuditCustomRule() *AuditCustomRuleApplyConfiguration {
+ return &AuditCustomRuleApplyConfiguration{}
+}
+
+// WithGroup sets the Group field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Group field is set to the value of the last call.
+func (b *AuditCustomRuleApplyConfiguration) WithGroup(value string) *AuditCustomRuleApplyConfiguration {
+ b.Group = &value
+ return b
+}
+
+// WithProfile sets the Profile field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Profile field is set to the value of the last call.
+func (b *AuditCustomRuleApplyConfiguration) WithProfile(value v1.AuditProfileType) *AuditCustomRuleApplyConfiguration {
+ b.Profile = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go
new file mode 100644
index 0000000000..5f55198562
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// AuthenticationApplyConfiguration represents an declarative configuration of the Authentication type for use
+// with apply.
+type AuthenticationApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *AuthenticationSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *AuthenticationStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// Authentication constructs an declarative configuration of the Authentication type for use with
+// apply.
+func Authentication(name string) *AuthenticationApplyConfiguration {
+ b := &AuthenticationApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Authentication")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractAuthentication extracts the applied configuration owned by fieldManager from
+// authentication. If no managedFields are found in authentication for fieldManager, a
+// AuthenticationApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// authentication must be a unmodified Authentication API object that was retrieved from the Kubernetes API.
+// ExtractAuthentication provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractAuthentication(authentication *apiconfigv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) {
+ return extractAuthentication(authentication, fieldManager, "")
+}
+
+// ExtractAuthenticationStatus is the same as ExtractAuthentication except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractAuthenticationStatus(authentication *apiconfigv1.Authentication, fieldManager string) (*AuthenticationApplyConfiguration, error) {
+ return extractAuthentication(authentication, fieldManager, "status")
+}
+
+func extractAuthentication(authentication *apiconfigv1.Authentication, fieldManager string, subresource string) (*AuthenticationApplyConfiguration, error) {
+ b := &AuthenticationApplyConfiguration{}
+ err := managedfields.ExtractInto(authentication, internal.Parser().Type("com.github.openshift.api.config.v1.Authentication"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(authentication.Name)
+
+ b.WithKind("Authentication")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithKind(value string) *AuthenticationApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithAPIVersion(value string) *AuthenticationApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithName(value string) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithGenerateName(value string) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithNamespace(value string) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithUID(value types.UID) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithResourceVersion(value string) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithGeneration(value int64) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *AuthenticationApplyConfiguration) WithLabels(entries map[string]string) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *AuthenticationApplyConfiguration) WithAnnotations(entries map[string]string) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *AuthenticationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *AuthenticationApplyConfiguration) WithFinalizers(values ...string) *AuthenticationApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *AuthenticationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithSpec(value *AuthenticationSpecApplyConfiguration) *AuthenticationApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatusApplyConfiguration) *AuthenticationApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go
new file mode 100644
index 0000000000..f152d261a8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationspec.go
@@ -0,0 +1,82 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// AuthenticationSpecApplyConfiguration represents an declarative configuration of the AuthenticationSpec type for use
+// with apply.
+type AuthenticationSpecApplyConfiguration struct {
+ Type *v1.AuthenticationType `json:"type,omitempty"`
+ OAuthMetadata *ConfigMapNameReferenceApplyConfiguration `json:"oauthMetadata,omitempty"`
+ WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticators,omitempty"`
+ WebhookTokenAuthenticator *WebhookTokenAuthenticatorApplyConfiguration `json:"webhookTokenAuthenticator,omitempty"`
+ ServiceAccountIssuer *string `json:"serviceAccountIssuer,omitempty"`
+ OIDCProviders []OIDCProviderApplyConfiguration `json:"oidcProviders,omitempty"`
+}
+
+// AuthenticationSpecApplyConfiguration constructs an declarative configuration of the AuthenticationSpec type for use with
+// apply.
+func AuthenticationSpec() *AuthenticationSpecApplyConfiguration {
+ return &AuthenticationSpecApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *AuthenticationSpecApplyConfiguration) WithType(value v1.AuthenticationType) *AuthenticationSpecApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithOAuthMetadata sets the OAuthMetadata field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the OAuthMetadata field is set to the value of the last call.
+func (b *AuthenticationSpecApplyConfiguration) WithOAuthMetadata(value *ConfigMapNameReferenceApplyConfiguration) *AuthenticationSpecApplyConfiguration {
+ b.OAuthMetadata = value
+ return b
+}
+
+// WithWebhookTokenAuthenticators adds the given value to the WebhookTokenAuthenticators field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the WebhookTokenAuthenticators field.
+func (b *AuthenticationSpecApplyConfiguration) WithWebhookTokenAuthenticators(values ...*DeprecatedWebhookTokenAuthenticatorApplyConfiguration) *AuthenticationSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithWebhookTokenAuthenticators")
+ }
+ b.WebhookTokenAuthenticators = append(b.WebhookTokenAuthenticators, *values[i])
+ }
+ return b
+}
+
+// WithWebhookTokenAuthenticator sets the WebhookTokenAuthenticator field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the WebhookTokenAuthenticator field is set to the value of the last call.
+func (b *AuthenticationSpecApplyConfiguration) WithWebhookTokenAuthenticator(value *WebhookTokenAuthenticatorApplyConfiguration) *AuthenticationSpecApplyConfiguration {
+ b.WebhookTokenAuthenticator = value
+ return b
+}
+
+// WithServiceAccountIssuer sets the ServiceAccountIssuer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ServiceAccountIssuer field is set to the value of the last call.
+func (b *AuthenticationSpecApplyConfiguration) WithServiceAccountIssuer(value string) *AuthenticationSpecApplyConfiguration {
+ b.ServiceAccountIssuer = &value
+ return b
+}
+
+// WithOIDCProviders adds the given value to the OIDCProviders field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OIDCProviders field.
+func (b *AuthenticationSpecApplyConfiguration) WithOIDCProviders(values ...*OIDCProviderApplyConfiguration) *AuthenticationSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOIDCProviders")
+ }
+ b.OIDCProviders = append(b.OIDCProviders, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go
new file mode 100644
index 0000000000..e1bb74c0c7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authenticationstatus.go
@@ -0,0 +1,37 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AuthenticationStatusApplyConfiguration represents an declarative configuration of the AuthenticationStatus type for use
+// with apply.
+type AuthenticationStatusApplyConfiguration struct {
+ IntegratedOAuthMetadata *ConfigMapNameReferenceApplyConfiguration `json:"integratedOAuthMetadata,omitempty"`
+ OIDCClients []OIDCClientStatusApplyConfiguration `json:"oidcClients,omitempty"`
+}
+
+// AuthenticationStatusApplyConfiguration constructs an declarative configuration of the AuthenticationStatus type for use with
+// apply.
+func AuthenticationStatus() *AuthenticationStatusApplyConfiguration {
+ return &AuthenticationStatusApplyConfiguration{}
+}
+
+// WithIntegratedOAuthMetadata sets the IntegratedOAuthMetadata field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IntegratedOAuthMetadata field is set to the value of the last call.
+func (b *AuthenticationStatusApplyConfiguration) WithIntegratedOAuthMetadata(value *ConfigMapNameReferenceApplyConfiguration) *AuthenticationStatusApplyConfiguration {
+ b.IntegratedOAuthMetadata = value
+ return b
+}
+
+// WithOIDCClients adds the given value to the OIDCClients field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OIDCClients field.
+func (b *AuthenticationStatusApplyConfiguration) WithOIDCClients(values ...*OIDCClientStatusApplyConfiguration) *AuthenticationStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOIDCClients")
+ }
+ b.OIDCClients = append(b.OIDCClients, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go
new file mode 100644
index 0000000000..4f7ce43d14
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AWSDNSSpecApplyConfiguration represents an declarative configuration of the AWSDNSSpec type for use
+// with apply.
+type AWSDNSSpecApplyConfiguration struct {
+ PrivateZoneIAMRole *string `json:"privateZoneIAMRole,omitempty"`
+}
+
+// AWSDNSSpecApplyConfiguration constructs an declarative configuration of the AWSDNSSpec type for use with
+// apply.
+func AWSDNSSpec() *AWSDNSSpecApplyConfiguration {
+ return &AWSDNSSpecApplyConfiguration{}
+}
+
+// WithPrivateZoneIAMRole sets the PrivateZoneIAMRole field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PrivateZoneIAMRole field is set to the value of the last call.
+func (b *AWSDNSSpecApplyConfiguration) WithPrivateZoneIAMRole(value string) *AWSDNSSpecApplyConfiguration {
+ b.PrivateZoneIAMRole = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go
new file mode 100644
index 0000000000..9a56b68baa
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsingressspec.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// AWSIngressSpecApplyConfiguration represents an declarative configuration of the AWSIngressSpec type for use
+// with apply.
+type AWSIngressSpecApplyConfiguration struct {
+ Type *v1.AWSLBType `json:"type,omitempty"`
+}
+
+// AWSIngressSpecApplyConfiguration constructs an declarative configuration of the AWSIngressSpec type for use with
+// apply.
+func AWSIngressSpec() *AWSIngressSpecApplyConfiguration {
+ return &AWSIngressSpecApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *AWSIngressSpecApplyConfiguration) WithType(value v1.AWSLBType) *AWSIngressSpecApplyConfiguration {
+ b.Type = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go
new file mode 100644
index 0000000000..b8132541f5
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformspec.go
@@ -0,0 +1,28 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AWSPlatformSpecApplyConfiguration represents an declarative configuration of the AWSPlatformSpec type for use
+// with apply.
+type AWSPlatformSpecApplyConfiguration struct {
+ ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"`
+}
+
+// AWSPlatformSpecApplyConfiguration constructs an declarative configuration of the AWSPlatformSpec type for use with
+// apply.
+func AWSPlatformSpec() *AWSPlatformSpecApplyConfiguration {
+ return &AWSPlatformSpecApplyConfiguration{}
+}
+
+// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field.
+func (b *AWSPlatformSpecApplyConfiguration) WithServiceEndpoints(values ...*AWSServiceEndpointApplyConfiguration) *AWSPlatformSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithServiceEndpoints")
+ }
+ b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go
new file mode 100644
index 0000000000..fb317ba275
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go
@@ -0,0 +1,51 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AWSPlatformStatusApplyConfiguration represents an declarative configuration of the AWSPlatformStatus type for use
+// with apply.
+type AWSPlatformStatusApplyConfiguration struct {
+ Region *string `json:"region,omitempty"`
+ ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"`
+ ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"`
+}
+
+// AWSPlatformStatusApplyConfiguration constructs an declarative configuration of the AWSPlatformStatus type for use with
+// apply.
+func AWSPlatformStatus() *AWSPlatformStatusApplyConfiguration {
+ return &AWSPlatformStatusApplyConfiguration{}
+}
+
+// WithRegion sets the Region field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Region field is set to the value of the last call.
+func (b *AWSPlatformStatusApplyConfiguration) WithRegion(value string) *AWSPlatformStatusApplyConfiguration {
+ b.Region = &value
+ return b
+}
+
+// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field.
+func (b *AWSPlatformStatusApplyConfiguration) WithServiceEndpoints(values ...*AWSServiceEndpointApplyConfiguration) *AWSPlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithServiceEndpoints")
+ }
+ b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i])
+ }
+ return b
+}
+
+// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceTags field.
+func (b *AWSPlatformStatusApplyConfiguration) WithResourceTags(values ...*AWSResourceTagApplyConfiguration) *AWSPlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResourceTags")
+ }
+ b.ResourceTags = append(b.ResourceTags, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go
new file mode 100644
index 0000000000..f9f174fc5b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsresourcetag.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AWSResourceTagApplyConfiguration represents an declarative configuration of the AWSResourceTag type for use
+// with apply.
+type AWSResourceTagApplyConfiguration struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// AWSResourceTagApplyConfiguration constructs an declarative configuration of the AWSResourceTag type for use with
+// apply.
+func AWSResourceTag() *AWSResourceTagApplyConfiguration {
+ return &AWSResourceTagApplyConfiguration{}
+}
+
+// WithKey sets the Key field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Key field is set to the value of the last call.
+func (b *AWSResourceTagApplyConfiguration) WithKey(value string) *AWSResourceTagApplyConfiguration {
+ b.Key = &value
+ return b
+}
+
+// WithValue sets the Value field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Value field is set to the value of the last call.
+func (b *AWSResourceTagApplyConfiguration) WithValue(value string) *AWSResourceTagApplyConfiguration {
+ b.Value = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go
new file mode 100644
index 0000000000..169e4bb2a7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsserviceendpoint.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AWSServiceEndpointApplyConfiguration represents an declarative configuration of the AWSServiceEndpoint type for use
+// with apply.
+type AWSServiceEndpointApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ URL *string `json:"url,omitempty"`
+}
+
+// AWSServiceEndpointApplyConfiguration constructs an declarative configuration of the AWSServiceEndpoint type for use with
+// apply.
+func AWSServiceEndpoint() *AWSServiceEndpointApplyConfiguration {
+ return &AWSServiceEndpointApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *AWSServiceEndpointApplyConfiguration) WithName(value string) *AWSServiceEndpointApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *AWSServiceEndpointApplyConfiguration) WithURL(value string) *AWSServiceEndpointApplyConfiguration {
+ b.URL = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go
new file mode 100644
index 0000000000..52b291553c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go
@@ -0,0 +1,68 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// AzurePlatformStatusApplyConfiguration represents an declarative configuration of the AzurePlatformStatus type for use
+// with apply.
+type AzurePlatformStatusApplyConfiguration struct {
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ NetworkResourceGroupName *string `json:"networkResourceGroupName,omitempty"`
+ CloudName *v1.AzureCloudEnvironment `json:"cloudName,omitempty"`
+ ARMEndpoint *string `json:"armEndpoint,omitempty"`
+ ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"`
+}
+
+// AzurePlatformStatusApplyConfiguration constructs an declarative configuration of the AzurePlatformStatus type for use with
+// apply.
+func AzurePlatformStatus() *AzurePlatformStatusApplyConfiguration {
+ return &AzurePlatformStatusApplyConfiguration{}
+}
+
+// WithResourceGroupName sets the ResourceGroupName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceGroupName field is set to the value of the last call.
+func (b *AzurePlatformStatusApplyConfiguration) WithResourceGroupName(value string) *AzurePlatformStatusApplyConfiguration {
+ b.ResourceGroupName = &value
+ return b
+}
+
+// WithNetworkResourceGroupName sets the NetworkResourceGroupName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NetworkResourceGroupName field is set to the value of the last call.
+func (b *AzurePlatformStatusApplyConfiguration) WithNetworkResourceGroupName(value string) *AzurePlatformStatusApplyConfiguration {
+ b.NetworkResourceGroupName = &value
+ return b
+}
+
+// WithCloudName sets the CloudName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CloudName field is set to the value of the last call.
+func (b *AzurePlatformStatusApplyConfiguration) WithCloudName(value v1.AzureCloudEnvironment) *AzurePlatformStatusApplyConfiguration {
+ b.CloudName = &value
+ return b
+}
+
+// WithARMEndpoint sets the ARMEndpoint field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ARMEndpoint field is set to the value of the last call.
+func (b *AzurePlatformStatusApplyConfiguration) WithARMEndpoint(value string) *AzurePlatformStatusApplyConfiguration {
+ b.ARMEndpoint = &value
+ return b
+}
+
+// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceTags field.
+func (b *AzurePlatformStatusApplyConfiguration) WithResourceTags(values ...*AzureResourceTagApplyConfiguration) *AzurePlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResourceTags")
+ }
+ b.ResourceTags = append(b.ResourceTags, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go
new file mode 100644
index 0000000000..f258f09876
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// AzureResourceTagApplyConfiguration represents an declarative configuration of the AzureResourceTag type for use
+// with apply.
+type AzureResourceTagApplyConfiguration struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// AzureResourceTagApplyConfiguration constructs an declarative configuration of the AzureResourceTag type for use with
+// apply.
+func AzureResourceTag() *AzureResourceTagApplyConfiguration {
+ return &AzureResourceTagApplyConfiguration{}
+}
+
+// WithKey sets the Key field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Key field is set to the value of the last call.
+func (b *AzureResourceTagApplyConfiguration) WithKey(value string) *AzureResourceTagApplyConfiguration {
+ b.Key = &value
+ return b
+}
+
+// WithValue sets the Value field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Value field is set to the value of the last call.
+func (b *AzureResourceTagApplyConfiguration) WithValue(value string) *AzureResourceTagApplyConfiguration {
+ b.Value = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go
new file mode 100644
index 0000000000..7ff5dd99e7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// BareMetalPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the BareMetalPlatformLoadBalancer type for use
+// with apply.
+type BareMetalPlatformLoadBalancerApplyConfiguration struct {
+ Type *v1.PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// BareMetalPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the BareMetalPlatformLoadBalancer type for use with
+// apply.
+func BareMetalPlatformLoadBalancer() *BareMetalPlatformLoadBalancerApplyConfiguration {
+ return &BareMetalPlatformLoadBalancerApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *BareMetalPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *BareMetalPlatformLoadBalancerApplyConfiguration {
+ b.Type = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go
new file mode 100644
index 0000000000..d96c5330b5
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformspec.go
@@ -0,0 +1,51 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// BareMetalPlatformSpecApplyConfiguration represents an declarative configuration of the BareMetalPlatformSpec type for use
+// with apply.
+type BareMetalPlatformSpecApplyConfiguration struct {
+ APIServerInternalIPs []v1.IP `json:"apiServerInternalIPs,omitempty"`
+ IngressIPs []v1.IP `json:"ingressIPs,omitempty"`
+ MachineNetworks []v1.CIDR `json:"machineNetworks,omitempty"`
+}
+
+// BareMetalPlatformSpecApplyConfiguration constructs an declarative configuration of the BareMetalPlatformSpec type for use with
+// apply.
+func BareMetalPlatformSpec() *BareMetalPlatformSpecApplyConfiguration {
+ return &BareMetalPlatformSpecApplyConfiguration{}
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *BareMetalPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...v1.IP) *BareMetalPlatformSpecApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *BareMetalPlatformSpecApplyConfiguration) WithIngressIPs(values ...v1.IP) *BareMetalPlatformSpecApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MachineNetworks field.
+func (b *BareMetalPlatformSpecApplyConfiguration) WithMachineNetworks(values ...v1.CIDR) *BareMetalPlatformSpecApplyConfiguration {
+ for i := range values {
+ b.MachineNetworks = append(b.MachineNetworks, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go
new file mode 100644
index 0000000000..87873d49ad
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go
@@ -0,0 +1,87 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// BareMetalPlatformStatusApplyConfiguration represents an declarative configuration of the BareMetalPlatformStatus type for use
+// with apply.
+type BareMetalPlatformStatusApplyConfiguration struct {
+ APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"`
+ APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"`
+ IngressIP *string `json:"ingressIP,omitempty"`
+ IngressIPs []string `json:"ingressIPs,omitempty"`
+ NodeDNSIP *string `json:"nodeDNSIP,omitempty"`
+ LoadBalancer *BareMetalPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"`
+ MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"`
+}
+
+// BareMetalPlatformStatusApplyConfiguration constructs an declarative configuration of the BareMetalPlatformStatus type for use with
+// apply.
+func BareMetalPlatformStatus() *BareMetalPlatformStatusApplyConfiguration {
+ return &BareMetalPlatformStatusApplyConfiguration{}
+}
+
+// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalIP field is set to the value of the last call.
+func (b *BareMetalPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *BareMetalPlatformStatusApplyConfiguration {
+ b.APIServerInternalIP = &value
+ return b
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *BareMetalPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *BareMetalPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressIP sets the IngressIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IngressIP field is set to the value of the last call.
+func (b *BareMetalPlatformStatusApplyConfiguration) WithIngressIP(value string) *BareMetalPlatformStatusApplyConfiguration {
+ b.IngressIP = &value
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *BareMetalPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *BareMetalPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeDNSIP field is set to the value of the last call.
+func (b *BareMetalPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *BareMetalPlatformStatusApplyConfiguration {
+ b.NodeDNSIP = &value
+ return b
+}
+
+// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LoadBalancer field is set to the value of the last call.
+func (b *BareMetalPlatformStatusApplyConfiguration) WithLoadBalancer(value *BareMetalPlatformLoadBalancerApplyConfiguration) *BareMetalPlatformStatusApplyConfiguration {
+ b.LoadBalancer = value
+ return b
+}
+
+// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MachineNetworks field.
+func (b *BareMetalPlatformStatusApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *BareMetalPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.MachineNetworks = append(b.MachineNetworks, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go
new file mode 100644
index 0000000000..9d181ebde2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/basicauthidentityprovider.go
@@ -0,0 +1,47 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// BasicAuthIdentityProviderApplyConfiguration represents an declarative configuration of the BasicAuthIdentityProvider type for use
+// with apply.
+type BasicAuthIdentityProviderApplyConfiguration struct {
+ OAuthRemoteConnectionInfoApplyConfiguration `json:",inline"`
+}
+
+// BasicAuthIdentityProviderApplyConfiguration constructs an declarative configuration of the BasicAuthIdentityProvider type for use with
+// apply.
+func BasicAuthIdentityProvider() *BasicAuthIdentityProviderApplyConfiguration {
+ return &BasicAuthIdentityProviderApplyConfiguration{}
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *BasicAuthIdentityProviderApplyConfiguration) WithURL(value string) *BasicAuthIdentityProviderApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *BasicAuthIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration {
+ b.CA = value
+ return b
+}
+
+// WithTLSClientCert sets the TLSClientCert field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TLSClientCert field is set to the value of the last call.
+func (b *BasicAuthIdentityProviderApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration {
+ b.TLSClientCert = value
+ return b
+}
+
+// WithTLSClientKey sets the TLSClientKey field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TLSClientKey field is set to the value of the last call.
+func (b *BasicAuthIdentityProviderApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *BasicAuthIdentityProviderApplyConfiguration {
+ b.TLSClientKey = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go
new file mode 100644
index 0000000000..39100461aa
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go
@@ -0,0 +1,231 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// BuildApplyConfiguration represents an declarative configuration of the Build type for use
+// with apply.
+type BuildApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// Build constructs an declarative configuration of the Build type for use with
+// apply.
+func Build(name string) *BuildApplyConfiguration {
+ b := &BuildApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Build")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractBuild extracts the applied configuration owned by fieldManager from
+// build. If no managedFields are found in build for fieldManager, a
+// BuildApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// build must be a unmodified Build API object that was retrieved from the Kubernetes API.
+// ExtractBuild provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractBuild(build *apiconfigv1.Build, fieldManager string) (*BuildApplyConfiguration, error) {
+ return extractBuild(build, fieldManager, "")
+}
+
+// ExtractBuildStatus is the same as ExtractBuild except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractBuildStatus(build *apiconfigv1.Build, fieldManager string) (*BuildApplyConfiguration, error) {
+ return extractBuild(build, fieldManager, "status")
+}
+
+func extractBuild(build *apiconfigv1.Build, fieldManager string, subresource string) (*BuildApplyConfiguration, error) {
+ b := &BuildApplyConfiguration{}
+ err := managedfields.ExtractInto(build, internal.Parser().Type("com.github.openshift.api.config.v1.Build"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(build.Name)
+
+ b.WithKind("Build")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithKind(value string) *BuildApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithAPIVersion(value string) *BuildApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithName(value string) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithGenerateName(value string) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithNamespace(value string) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithUID(value types.UID) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithResourceVersion(value string) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithGeneration(value int64) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *BuildApplyConfiguration) WithLabels(entries map[string]string) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *BuildApplyConfiguration) WithAnnotations(entries map[string]string) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *BuildApplyConfiguration) WithFinalizers(values ...string) *BuildApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *BuildApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) *BuildApplyConfiguration {
+ b.Spec = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go
new file mode 100644
index 0000000000..347906b3b0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/builddefaults.go
@@ -0,0 +1,70 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+)
+
+// BuildDefaultsApplyConfiguration represents an declarative configuration of the BuildDefaults type for use
+// with apply.
+type BuildDefaultsApplyConfiguration struct {
+ DefaultProxy *ProxySpecApplyConfiguration `json:"defaultProxy,omitempty"`
+ GitProxy *ProxySpecApplyConfiguration `json:"gitProxy,omitempty"`
+ Env []corev1.EnvVar `json:"env,omitempty"`
+ ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"`
+ Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
+}
+
+// BuildDefaultsApplyConfiguration constructs an declarative configuration of the BuildDefaults type for use with
+// apply.
+func BuildDefaults() *BuildDefaultsApplyConfiguration {
+ return &BuildDefaultsApplyConfiguration{}
+}
+
+// WithDefaultProxy sets the DefaultProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DefaultProxy field is set to the value of the last call.
+func (b *BuildDefaultsApplyConfiguration) WithDefaultProxy(value *ProxySpecApplyConfiguration) *BuildDefaultsApplyConfiguration {
+ b.DefaultProxy = value
+ return b
+}
+
+// WithGitProxy sets the GitProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GitProxy field is set to the value of the last call.
+func (b *BuildDefaultsApplyConfiguration) WithGitProxy(value *ProxySpecApplyConfiguration) *BuildDefaultsApplyConfiguration {
+ b.GitProxy = value
+ return b
+}
+
+// WithEnv adds the given value to the Env field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Env field.
+func (b *BuildDefaultsApplyConfiguration) WithEnv(values ...corev1.EnvVar) *BuildDefaultsApplyConfiguration {
+ for i := range values {
+ b.Env = append(b.Env, values[i])
+ }
+ return b
+}
+
+// WithImageLabels adds the given value to the ImageLabels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ImageLabels field.
+func (b *BuildDefaultsApplyConfiguration) WithImageLabels(values ...*ImageLabelApplyConfiguration) *BuildDefaultsApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithImageLabels")
+ }
+ b.ImageLabels = append(b.ImageLabels, *values[i])
+ }
+ return b
+}
+
+// WithResources sets the Resources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Resources field is set to the value of the last call.
+func (b *BuildDefaultsApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildDefaultsApplyConfiguration {
+ b.Resources = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go
new file mode 100644
index 0000000000..7ce64634ac
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildoverrides.go
@@ -0,0 +1,67 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+)
+
+// BuildOverridesApplyConfiguration represents an declarative configuration of the BuildOverrides type for use
+// with apply.
+type BuildOverridesApplyConfiguration struct {
+ ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"`
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+ ForcePull *bool `json:"forcePull,omitempty"`
+}
+
+// BuildOverridesApplyConfiguration constructs an declarative configuration of the BuildOverrides type for use with
+// apply.
+func BuildOverrides() *BuildOverridesApplyConfiguration {
+ return &BuildOverridesApplyConfiguration{}
+}
+
+// WithImageLabels adds the given value to the ImageLabels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ImageLabels field.
+func (b *BuildOverridesApplyConfiguration) WithImageLabels(values ...*ImageLabelApplyConfiguration) *BuildOverridesApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithImageLabels")
+ }
+ b.ImageLabels = append(b.ImageLabels, *values[i])
+ }
+ return b
+}
+
+// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the NodeSelector field,
+// overwriting an existing map entries in NodeSelector field with the same key.
+func (b *BuildOverridesApplyConfiguration) WithNodeSelector(entries map[string]string) *BuildOverridesApplyConfiguration {
+ if b.NodeSelector == nil && len(entries) > 0 {
+ b.NodeSelector = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.NodeSelector[k] = v
+ }
+ return b
+}
+
+// WithTolerations adds the given value to the Tolerations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Tolerations field.
+func (b *BuildOverridesApplyConfiguration) WithTolerations(values ...corev1.Toleration) *BuildOverridesApplyConfiguration {
+ for i := range values {
+ b.Tolerations = append(b.Tolerations, values[i])
+ }
+ return b
+}
+
+// WithForcePull sets the ForcePull field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ForcePull field is set to the value of the last call.
+func (b *BuildOverridesApplyConfiguration) WithForcePull(value bool) *BuildOverridesApplyConfiguration {
+ b.ForcePull = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go
new file mode 100644
index 0000000000..521cef0e87
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/buildspec.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// BuildSpecApplyConfiguration represents an declarative configuration of the BuildSpec type for use
+// with apply.
+type BuildSpecApplyConfiguration struct {
+ AdditionalTrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"additionalTrustedCA,omitempty"`
+ BuildDefaults *BuildDefaultsApplyConfiguration `json:"buildDefaults,omitempty"`
+ BuildOverrides *BuildOverridesApplyConfiguration `json:"buildOverrides,omitempty"`
+}
+
+// BuildSpecApplyConfiguration constructs an declarative configuration of the BuildSpec type for use with
+// apply.
+func BuildSpec() *BuildSpecApplyConfiguration {
+ return &BuildSpecApplyConfiguration{}
+}
+
+// WithAdditionalTrustedCA sets the AdditionalTrustedCA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AdditionalTrustedCA field is set to the value of the last call.
+func (b *BuildSpecApplyConfiguration) WithAdditionalTrustedCA(value *ConfigMapNameReferenceApplyConfiguration) *BuildSpecApplyConfiguration {
+ b.AdditionalTrustedCA = value
+ return b
+}
+
+// WithBuildDefaults sets the BuildDefaults field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BuildDefaults field is set to the value of the last call.
+func (b *BuildSpecApplyConfiguration) WithBuildDefaults(value *BuildDefaultsApplyConfiguration) *BuildSpecApplyConfiguration {
+ b.BuildDefaults = value
+ return b
+}
+
+// WithBuildOverrides sets the BuildOverrides field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BuildOverrides field is set to the value of the last call.
+func (b *BuildSpecApplyConfiguration) WithBuildOverrides(value *BuildOverridesApplyConfiguration) *BuildSpecApplyConfiguration {
+ b.BuildOverrides = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go
new file mode 100644
index 0000000000..2d7a55a783
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudcontrollermanagerstatus.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// CloudControllerManagerStatusApplyConfiguration represents an declarative configuration of the CloudControllerManagerStatus type for use
+// with apply.
+type CloudControllerManagerStatusApplyConfiguration struct {
+ State *v1.CloudControllerManagerState `json:"state,omitempty"`
+}
+
+// CloudControllerManagerStatusApplyConfiguration constructs an declarative configuration of the CloudControllerManagerStatus type for use with
+// apply.
+func CloudControllerManagerStatus() *CloudControllerManagerStatusApplyConfiguration {
+ return &CloudControllerManagerStatusApplyConfiguration{}
+}
+
+// WithState sets the State field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the State field is set to the value of the last call.
+func (b *CloudControllerManagerStatusApplyConfiguration) WithState(value v1.CloudControllerManagerState) *CloudControllerManagerStatusApplyConfiguration {
+ b.State = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go
new file mode 100644
index 0000000000..c84f6c7765
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerconfig.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// CloudLoadBalancerConfigApplyConfiguration represents an declarative configuration of the CloudLoadBalancerConfig type for use
+// with apply.
+type CloudLoadBalancerConfigApplyConfiguration struct {
+ DNSType *v1.DNSType `json:"dnsType,omitempty"`
+ ClusterHosted *CloudLoadBalancerIPsApplyConfiguration `json:"clusterHosted,omitempty"`
+}
+
+// CloudLoadBalancerConfigApplyConfiguration constructs an declarative configuration of the CloudLoadBalancerConfig type for use with
+// apply.
+func CloudLoadBalancerConfig() *CloudLoadBalancerConfigApplyConfiguration {
+ return &CloudLoadBalancerConfigApplyConfiguration{}
+}
+
+// WithDNSType sets the DNSType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DNSType field is set to the value of the last call.
+func (b *CloudLoadBalancerConfigApplyConfiguration) WithDNSType(value v1.DNSType) *CloudLoadBalancerConfigApplyConfiguration {
+ b.DNSType = &value
+ return b
+}
+
+// WithClusterHosted sets the ClusterHosted field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClusterHosted field is set to the value of the last call.
+func (b *CloudLoadBalancerConfigApplyConfiguration) WithClusterHosted(value *CloudLoadBalancerIPsApplyConfiguration) *CloudLoadBalancerConfigApplyConfiguration {
+ b.ClusterHosted = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go
new file mode 100644
index 0000000000..6480177fe2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/cloudloadbalancerips.go
@@ -0,0 +1,51 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// CloudLoadBalancerIPsApplyConfiguration represents an declarative configuration of the CloudLoadBalancerIPs type for use
+// with apply.
+type CloudLoadBalancerIPsApplyConfiguration struct {
+ APIIntLoadBalancerIPs []v1.IP `json:"apiIntLoadBalancerIPs,omitempty"`
+ APILoadBalancerIPs []v1.IP `json:"apiLoadBalancerIPs,omitempty"`
+ IngressLoadBalancerIPs []v1.IP `json:"ingressLoadBalancerIPs,omitempty"`
+}
+
+// CloudLoadBalancerIPsApplyConfiguration constructs an declarative configuration of the CloudLoadBalancerIPs type for use with
+// apply.
+func CloudLoadBalancerIPs() *CloudLoadBalancerIPsApplyConfiguration {
+ return &CloudLoadBalancerIPsApplyConfiguration{}
+}
+
+// WithAPIIntLoadBalancerIPs adds the given value to the APIIntLoadBalancerIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIIntLoadBalancerIPs field.
+func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPIIntLoadBalancerIPs(values ...v1.IP) *CloudLoadBalancerIPsApplyConfiguration {
+ for i := range values {
+ b.APIIntLoadBalancerIPs = append(b.APIIntLoadBalancerIPs, values[i])
+ }
+ return b
+}
+
+// WithAPILoadBalancerIPs adds the given value to the APILoadBalancerIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APILoadBalancerIPs field.
+func (b *CloudLoadBalancerIPsApplyConfiguration) WithAPILoadBalancerIPs(values ...v1.IP) *CloudLoadBalancerIPsApplyConfiguration {
+ for i := range values {
+ b.APILoadBalancerIPs = append(b.APILoadBalancerIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressLoadBalancerIPs adds the given value to the IngressLoadBalancerIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressLoadBalancerIPs field.
+func (b *CloudLoadBalancerIPsApplyConfiguration) WithIngressLoadBalancerIPs(values ...v1.IP) *CloudLoadBalancerIPsApplyConfiguration {
+ for i := range values {
+ b.IngressLoadBalancerIPs = append(b.IngressLoadBalancerIPs, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go
new file mode 100644
index 0000000000..145fa267a4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustercondition.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ClusterConditionApplyConfiguration represents an declarative configuration of the ClusterCondition type for use
+// with apply.
+type ClusterConditionApplyConfiguration struct {
+ Type *string `json:"type,omitempty"`
+ PromQL *PromQLClusterConditionApplyConfiguration `json:"promql,omitempty"`
+}
+
+// ClusterConditionApplyConfiguration constructs an declarative configuration of the ClusterCondition type for use with
+// apply.
+func ClusterCondition() *ClusterConditionApplyConfiguration {
+ return &ClusterConditionApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *ClusterConditionApplyConfiguration) WithType(value string) *ClusterConditionApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithPromQL sets the PromQL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PromQL field is set to the value of the last call.
+func (b *ClusterConditionApplyConfiguration) WithPromQL(value *PromQLClusterConditionApplyConfiguration) *ClusterConditionApplyConfiguration {
+ b.PromQL = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go
new file mode 100644
index 0000000000..fe03d3355d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusternetworkentry.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ClusterNetworkEntryApplyConfiguration represents an declarative configuration of the ClusterNetworkEntry type for use
+// with apply.
+type ClusterNetworkEntryApplyConfiguration struct {
+ CIDR *string `json:"cidr,omitempty"`
+ HostPrefix *uint32 `json:"hostPrefix,omitempty"`
+}
+
+// ClusterNetworkEntryApplyConfiguration constructs an declarative configuration of the ClusterNetworkEntry type for use with
+// apply.
+func ClusterNetworkEntry() *ClusterNetworkEntryApplyConfiguration {
+ return &ClusterNetworkEntryApplyConfiguration{}
+}
+
+// WithCIDR sets the CIDR field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CIDR field is set to the value of the last call.
+func (b *ClusterNetworkEntryApplyConfiguration) WithCIDR(value string) *ClusterNetworkEntryApplyConfiguration {
+ b.CIDR = &value
+ return b
+}
+
+// WithHostPrefix sets the HostPrefix field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HostPrefix field is set to the value of the last call.
+func (b *ClusterNetworkEntryApplyConfiguration) WithHostPrefix(value uint32) *ClusterNetworkEntryApplyConfiguration {
+ b.HostPrefix = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go
new file mode 100644
index 0000000000..ab83fa08df
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ClusterOperatorApplyConfiguration represents an declarative configuration of the ClusterOperator type for use
+// with apply.
+type ClusterOperatorApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *configv1.ClusterOperatorSpec `json:"spec,omitempty"`
+ Status *ClusterOperatorStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// ClusterOperator constructs an declarative configuration of the ClusterOperator type for use with
+// apply.
+func ClusterOperator(name string) *ClusterOperatorApplyConfiguration {
+ b := &ClusterOperatorApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ClusterOperator")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractClusterOperator extracts the applied configuration owned by fieldManager from
+// clusterOperator. If no managedFields are found in clusterOperator for fieldManager, a
+// ClusterOperatorApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterOperator must be a unmodified ClusterOperator API object that was retrieved from the Kubernetes API.
+// ExtractClusterOperator provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractClusterOperator(clusterOperator *configv1.ClusterOperator, fieldManager string) (*ClusterOperatorApplyConfiguration, error) {
+ return extractClusterOperator(clusterOperator, fieldManager, "")
+}
+
+// ExtractClusterOperatorStatus is the same as ExtractClusterOperator except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterOperatorStatus(clusterOperator *configv1.ClusterOperator, fieldManager string) (*ClusterOperatorApplyConfiguration, error) {
+ return extractClusterOperator(clusterOperator, fieldManager, "status")
+}
+
+func extractClusterOperator(clusterOperator *configv1.ClusterOperator, fieldManager string, subresource string) (*ClusterOperatorApplyConfiguration, error) {
+ b := &ClusterOperatorApplyConfiguration{}
+ err := managedfields.ExtractInto(clusterOperator, internal.Parser().Type("com.github.openshift.api.config.v1.ClusterOperator"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(clusterOperator.Name)
+
+ b.WithKind("ClusterOperator")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithKind(value string) *ClusterOperatorApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithAPIVersion(value string) *ClusterOperatorApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithName(value string) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithGenerateName(value string) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithNamespace(value string) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithUID(value types.UID) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithResourceVersion(value string) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithGeneration(value int64) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ClusterOperatorApplyConfiguration) WithLabels(entries map[string]string) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ClusterOperatorApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ClusterOperatorApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ClusterOperatorApplyConfiguration) WithFinalizers(values ...string) *ClusterOperatorApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ClusterOperatorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithSpec(value configv1.ClusterOperatorSpec) *ClusterOperatorApplyConfiguration {
+ b.Spec = &value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ClusterOperatorApplyConfiguration) WithStatus(value *ClusterOperatorStatusApplyConfiguration) *ClusterOperatorApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go
new file mode 100644
index 0000000000..3fac7d9b6e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatus.go
@@ -0,0 +1,69 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// ClusterOperatorStatusApplyConfiguration represents an declarative configuration of the ClusterOperatorStatus type for use
+// with apply.
+type ClusterOperatorStatusApplyConfiguration struct {
+ Conditions []ClusterOperatorStatusConditionApplyConfiguration `json:"conditions,omitempty"`
+ Versions []OperandVersionApplyConfiguration `json:"versions,omitempty"`
+ RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"`
+ Extension *runtime.RawExtension `json:"extension,omitempty"`
+}
+
+// ClusterOperatorStatusApplyConfiguration constructs an declarative configuration of the ClusterOperatorStatus type for use with
+// apply.
+func ClusterOperatorStatus() *ClusterOperatorStatusApplyConfiguration {
+ return &ClusterOperatorStatusApplyConfiguration{}
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *ClusterOperatorStatusApplyConfiguration) WithConditions(values ...*ClusterOperatorStatusConditionApplyConfiguration) *ClusterOperatorStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
+
+// WithVersions adds the given value to the Versions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Versions field.
+func (b *ClusterOperatorStatusApplyConfiguration) WithVersions(values ...*OperandVersionApplyConfiguration) *ClusterOperatorStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithVersions")
+ }
+ b.Versions = append(b.Versions, *values[i])
+ }
+ return b
+}
+
+// WithRelatedObjects adds the given value to the RelatedObjects field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the RelatedObjects field.
+func (b *ClusterOperatorStatusApplyConfiguration) WithRelatedObjects(values ...*ObjectReferenceApplyConfiguration) *ClusterOperatorStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithRelatedObjects")
+ }
+ b.RelatedObjects = append(b.RelatedObjects, *values[i])
+ }
+ return b
+}
+
+// WithExtension sets the Extension field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Extension field is set to the value of the last call.
+func (b *ClusterOperatorStatusApplyConfiguration) WithExtension(value runtime.RawExtension) *ClusterOperatorStatusApplyConfiguration {
+ b.Extension = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go
new file mode 100644
index 0000000000..5e52013c8d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperatorstatuscondition.go
@@ -0,0 +1,64 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ClusterOperatorStatusConditionApplyConfiguration represents an declarative configuration of the ClusterOperatorStatusCondition type for use
+// with apply.
+type ClusterOperatorStatusConditionApplyConfiguration struct {
+ Type *v1.ClusterStatusConditionType `json:"type,omitempty"`
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ Message *string `json:"message,omitempty"`
+}
+
+// ClusterOperatorStatusConditionApplyConfiguration constructs an declarative configuration of the ClusterOperatorStatusCondition type for use with
+// apply.
+func ClusterOperatorStatusCondition() *ClusterOperatorStatusConditionApplyConfiguration {
+ return &ClusterOperatorStatusConditionApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *ClusterOperatorStatusConditionApplyConfiguration) WithType(value v1.ClusterStatusConditionType) *ClusterOperatorStatusConditionApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ClusterOperatorStatusConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ClusterOperatorStatusConditionApplyConfiguration {
+ b.Status = &value
+ return b
+}
+
+// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LastTransitionTime field is set to the value of the last call.
+func (b *ClusterOperatorStatusConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *ClusterOperatorStatusConditionApplyConfiguration {
+ b.LastTransitionTime = &value
+ return b
+}
+
+// WithReason sets the Reason field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Reason field is set to the value of the last call.
+func (b *ClusterOperatorStatusConditionApplyConfiguration) WithReason(value string) *ClusterOperatorStatusConditionApplyConfiguration {
+ b.Reason = &value
+ return b
+}
+
+// WithMessage sets the Message field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Message field is set to the value of the last call.
+func (b *ClusterOperatorStatusConditionApplyConfiguration) WithMessage(value string) *ClusterOperatorStatusConditionApplyConfiguration {
+ b.Message = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go
new file mode 100644
index 0000000000..24d5e143c6
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ClusterVersionApplyConfiguration represents an declarative configuration of the ClusterVersion type for use
+// with apply.
+type ClusterVersionApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ClusterVersionSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ClusterVersionStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// ClusterVersion constructs an declarative configuration of the ClusterVersion type for use with
+// apply.
+func ClusterVersion(name string) *ClusterVersionApplyConfiguration {
+ b := &ClusterVersionApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ClusterVersion")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractClusterVersion extracts the applied configuration owned by fieldManager from
+// clusterVersion. If no managedFields are found in clusterVersion for fieldManager, a
+// ClusterVersionApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterVersion must be a unmodified ClusterVersion API object that was retrieved from the Kubernetes API.
+// ExtractClusterVersion provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractClusterVersion(clusterVersion *apiconfigv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) {
+ return extractClusterVersion(clusterVersion, fieldManager, "")
+}
+
+// ExtractClusterVersionStatus is the same as ExtractClusterVersion except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterVersionStatus(clusterVersion *apiconfigv1.ClusterVersion, fieldManager string) (*ClusterVersionApplyConfiguration, error) {
+ return extractClusterVersion(clusterVersion, fieldManager, "status")
+}
+
+func extractClusterVersion(clusterVersion *apiconfigv1.ClusterVersion, fieldManager string, subresource string) (*ClusterVersionApplyConfiguration, error) {
+ b := &ClusterVersionApplyConfiguration{}
+ err := managedfields.ExtractInto(clusterVersion, internal.Parser().Type("com.github.openshift.api.config.v1.ClusterVersion"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(clusterVersion.Name)
+
+ b.WithKind("ClusterVersion")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithKind(value string) *ClusterVersionApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithAPIVersion(value string) *ClusterVersionApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithName(value string) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithGenerateName(value string) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithNamespace(value string) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithUID(value types.UID) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithResourceVersion(value string) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithGeneration(value int64) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ClusterVersionApplyConfiguration) WithLabels(entries map[string]string) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ClusterVersionApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ClusterVersionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ClusterVersionApplyConfiguration) WithFinalizers(values ...string) *ClusterVersionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ClusterVersionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithSpec(value *ClusterVersionSpecApplyConfiguration) *ClusterVersionApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ClusterVersionApplyConfiguration) WithStatus(value *ClusterVersionStatusApplyConfiguration) *ClusterVersionApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go
new file mode 100644
index 0000000000..b4a28f1b24
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesspec.go
@@ -0,0 +1,38 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ClusterVersionCapabilitiesSpecApplyConfiguration represents an declarative configuration of the ClusterVersionCapabilitiesSpec type for use
+// with apply.
+type ClusterVersionCapabilitiesSpecApplyConfiguration struct {
+ BaselineCapabilitySet *v1.ClusterVersionCapabilitySet `json:"baselineCapabilitySet,omitempty"`
+ AdditionalEnabledCapabilities []v1.ClusterVersionCapability `json:"additionalEnabledCapabilities,omitempty"`
+}
+
+// ClusterVersionCapabilitiesSpecApplyConfiguration constructs an declarative configuration of the ClusterVersionCapabilitiesSpec type for use with
+// apply.
+func ClusterVersionCapabilitiesSpec() *ClusterVersionCapabilitiesSpecApplyConfiguration {
+ return &ClusterVersionCapabilitiesSpecApplyConfiguration{}
+}
+
+// WithBaselineCapabilitySet sets the BaselineCapabilitySet field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BaselineCapabilitySet field is set to the value of the last call.
+func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithBaselineCapabilitySet(value v1.ClusterVersionCapabilitySet) *ClusterVersionCapabilitiesSpecApplyConfiguration {
+ b.BaselineCapabilitySet = &value
+ return b
+}
+
+// WithAdditionalEnabledCapabilities adds the given value to the AdditionalEnabledCapabilities field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AdditionalEnabledCapabilities field.
+func (b *ClusterVersionCapabilitiesSpecApplyConfiguration) WithAdditionalEnabledCapabilities(values ...v1.ClusterVersionCapability) *ClusterVersionCapabilitiesSpecApplyConfiguration {
+ for i := range values {
+ b.AdditionalEnabledCapabilities = append(b.AdditionalEnabledCapabilities, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go
new file mode 100644
index 0000000000..48c4363f11
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversioncapabilitiesstatus.go
@@ -0,0 +1,40 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ClusterVersionCapabilitiesStatusApplyConfiguration represents an declarative configuration of the ClusterVersionCapabilitiesStatus type for use
+// with apply.
+type ClusterVersionCapabilitiesStatusApplyConfiguration struct {
+ EnabledCapabilities []v1.ClusterVersionCapability `json:"enabledCapabilities,omitempty"`
+ KnownCapabilities []v1.ClusterVersionCapability `json:"knownCapabilities,omitempty"`
+}
+
+// ClusterVersionCapabilitiesStatusApplyConfiguration constructs an declarative configuration of the ClusterVersionCapabilitiesStatus type for use with
+// apply.
+func ClusterVersionCapabilitiesStatus() *ClusterVersionCapabilitiesStatusApplyConfiguration {
+ return &ClusterVersionCapabilitiesStatusApplyConfiguration{}
+}
+
+// WithEnabledCapabilities adds the given value to the EnabledCapabilities field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the EnabledCapabilities field.
+func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithEnabledCapabilities(values ...v1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration {
+ for i := range values {
+ b.EnabledCapabilities = append(b.EnabledCapabilities, values[i])
+ }
+ return b
+}
+
+// WithKnownCapabilities adds the given value to the KnownCapabilities field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the KnownCapabilities field.
+func (b *ClusterVersionCapabilitiesStatusApplyConfiguration) WithKnownCapabilities(values ...v1.ClusterVersionCapability) *ClusterVersionCapabilitiesStatusApplyConfiguration {
+ for i := range values {
+ b.KnownCapabilities = append(b.KnownCapabilities, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go
new file mode 100644
index 0000000000..e1fd4d37d0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionspec.go
@@ -0,0 +1,91 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ClusterVersionSpecApplyConfiguration represents an declarative configuration of the ClusterVersionSpec type for use
+// with apply.
+type ClusterVersionSpecApplyConfiguration struct {
+ ClusterID *v1.ClusterID `json:"clusterID,omitempty"`
+ DesiredUpdate *UpdateApplyConfiguration `json:"desiredUpdate,omitempty"`
+ Upstream *v1.URL `json:"upstream,omitempty"`
+ Channel *string `json:"channel,omitempty"`
+ Capabilities *ClusterVersionCapabilitiesSpecApplyConfiguration `json:"capabilities,omitempty"`
+ SignatureStores []SignatureStoreApplyConfiguration `json:"signatureStores,omitempty"`
+ Overrides []ComponentOverrideApplyConfiguration `json:"overrides,omitempty"`
+}
+
+// ClusterVersionSpecApplyConfiguration constructs an declarative configuration of the ClusterVersionSpec type for use with
+// apply.
+func ClusterVersionSpec() *ClusterVersionSpecApplyConfiguration {
+ return &ClusterVersionSpecApplyConfiguration{}
+}
+
+// WithClusterID sets the ClusterID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClusterID field is set to the value of the last call.
+func (b *ClusterVersionSpecApplyConfiguration) WithClusterID(value v1.ClusterID) *ClusterVersionSpecApplyConfiguration {
+ b.ClusterID = &value
+ return b
+}
+
+// WithDesiredUpdate sets the DesiredUpdate field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DesiredUpdate field is set to the value of the last call.
+func (b *ClusterVersionSpecApplyConfiguration) WithDesiredUpdate(value *UpdateApplyConfiguration) *ClusterVersionSpecApplyConfiguration {
+ b.DesiredUpdate = value
+ return b
+}
+
+// WithUpstream sets the Upstream field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Upstream field is set to the value of the last call.
+func (b *ClusterVersionSpecApplyConfiguration) WithUpstream(value v1.URL) *ClusterVersionSpecApplyConfiguration {
+ b.Upstream = &value
+ return b
+}
+
+// WithChannel sets the Channel field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Channel field is set to the value of the last call.
+func (b *ClusterVersionSpecApplyConfiguration) WithChannel(value string) *ClusterVersionSpecApplyConfiguration {
+ b.Channel = &value
+ return b
+}
+
+// WithCapabilities sets the Capabilities field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Capabilities field is set to the value of the last call.
+func (b *ClusterVersionSpecApplyConfiguration) WithCapabilities(value *ClusterVersionCapabilitiesSpecApplyConfiguration) *ClusterVersionSpecApplyConfiguration {
+ b.Capabilities = value
+ return b
+}
+
+// WithSignatureStores adds the given value to the SignatureStores field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the SignatureStores field.
+func (b *ClusterVersionSpecApplyConfiguration) WithSignatureStores(values ...*SignatureStoreApplyConfiguration) *ClusterVersionSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithSignatureStores")
+ }
+ b.SignatureStores = append(b.SignatureStores, *values[i])
+ }
+ return b
+}
+
+// WithOverrides adds the given value to the Overrides field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Overrides field.
+func (b *ClusterVersionSpecApplyConfiguration) WithOverrides(values ...*ComponentOverrideApplyConfiguration) *ClusterVersionSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOverrides")
+ }
+ b.Overrides = append(b.Overrides, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go
new file mode 100644
index 0000000000..3e9f450949
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversionstatus.go
@@ -0,0 +1,106 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ClusterVersionStatusApplyConfiguration represents an declarative configuration of the ClusterVersionStatus type for use
+// with apply.
+type ClusterVersionStatusApplyConfiguration struct {
+ Desired *ReleaseApplyConfiguration `json:"desired,omitempty"`
+ History []UpdateHistoryApplyConfiguration `json:"history,omitempty"`
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ VersionHash *string `json:"versionHash,omitempty"`
+ Capabilities *ClusterVersionCapabilitiesStatusApplyConfiguration `json:"capabilities,omitempty"`
+ Conditions []ClusterOperatorStatusConditionApplyConfiguration `json:"conditions,omitempty"`
+ AvailableUpdates []ReleaseApplyConfiguration `json:"availableUpdates,omitempty"`
+ ConditionalUpdates []ConditionalUpdateApplyConfiguration `json:"conditionalUpdates,omitempty"`
+}
+
+// ClusterVersionStatusApplyConfiguration constructs an declarative configuration of the ClusterVersionStatus type for use with
+// apply.
+func ClusterVersionStatus() *ClusterVersionStatusApplyConfiguration {
+ return &ClusterVersionStatusApplyConfiguration{}
+}
+
+// WithDesired sets the Desired field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Desired field is set to the value of the last call.
+func (b *ClusterVersionStatusApplyConfiguration) WithDesired(value *ReleaseApplyConfiguration) *ClusterVersionStatusApplyConfiguration {
+ b.Desired = value
+ return b
+}
+
+// WithHistory adds the given value to the History field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the History field.
+func (b *ClusterVersionStatusApplyConfiguration) WithHistory(values ...*UpdateHistoryApplyConfiguration) *ClusterVersionStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithHistory")
+ }
+ b.History = append(b.History, *values[i])
+ }
+ return b
+}
+
+// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ObservedGeneration field is set to the value of the last call.
+func (b *ClusterVersionStatusApplyConfiguration) WithObservedGeneration(value int64) *ClusterVersionStatusApplyConfiguration {
+ b.ObservedGeneration = &value
+ return b
+}
+
+// WithVersionHash sets the VersionHash field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the VersionHash field is set to the value of the last call.
+func (b *ClusterVersionStatusApplyConfiguration) WithVersionHash(value string) *ClusterVersionStatusApplyConfiguration {
+ b.VersionHash = &value
+ return b
+}
+
+// WithCapabilities sets the Capabilities field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Capabilities field is set to the value of the last call.
+func (b *ClusterVersionStatusApplyConfiguration) WithCapabilities(value *ClusterVersionCapabilitiesStatusApplyConfiguration) *ClusterVersionStatusApplyConfiguration {
+ b.Capabilities = value
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *ClusterVersionStatusApplyConfiguration) WithConditions(values ...*ClusterOperatorStatusConditionApplyConfiguration) *ClusterVersionStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
+
+// WithAvailableUpdates adds the given value to the AvailableUpdates field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AvailableUpdates field.
+func (b *ClusterVersionStatusApplyConfiguration) WithAvailableUpdates(values ...*ReleaseApplyConfiguration) *ClusterVersionStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithAvailableUpdates")
+ }
+ b.AvailableUpdates = append(b.AvailableUpdates, *values[i])
+ }
+ return b
+}
+
+// WithConditionalUpdates adds the given value to the ConditionalUpdates field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ConditionalUpdates field.
+func (b *ClusterVersionStatusApplyConfiguration) WithConditionalUpdates(values ...*ConditionalUpdateApplyConfiguration) *ClusterVersionStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditionalUpdates")
+ }
+ b.ConditionalUpdates = append(b.ConditionalUpdates, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go
new file mode 100644
index 0000000000..8467acef42
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentoverride.go
@@ -0,0 +1,59 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ComponentOverrideApplyConfiguration represents an declarative configuration of the ComponentOverride type for use
+// with apply.
+type ComponentOverrideApplyConfiguration struct {
+ Kind *string `json:"kind,omitempty"`
+ Group *string `json:"group,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Unmanaged *bool `json:"unmanaged,omitempty"`
+}
+
+// ComponentOverrideApplyConfiguration constructs an declarative configuration of the ComponentOverride type for use with
+// apply.
+func ComponentOverride() *ComponentOverrideApplyConfiguration {
+ return &ComponentOverrideApplyConfiguration{}
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ComponentOverrideApplyConfiguration) WithKind(value string) *ComponentOverrideApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithGroup sets the Group field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Group field is set to the value of the last call.
+func (b *ComponentOverrideApplyConfiguration) WithGroup(value string) *ComponentOverrideApplyConfiguration {
+ b.Group = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ComponentOverrideApplyConfiguration) WithNamespace(value string) *ComponentOverrideApplyConfiguration {
+ b.Namespace = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ComponentOverrideApplyConfiguration) WithName(value string) *ComponentOverrideApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithUnmanaged sets the Unmanaged field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Unmanaged field is set to the value of the last call.
+func (b *ComponentOverrideApplyConfiguration) WithUnmanaged(value bool) *ComponentOverrideApplyConfiguration {
+ b.Unmanaged = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go
new file mode 100644
index 0000000000..8e5973d91a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutespec.go
@@ -0,0 +1,54 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ComponentRouteSpecApplyConfiguration represents an declarative configuration of the ComponentRouteSpec type for use
+// with apply.
+type ComponentRouteSpecApplyConfiguration struct {
+ Namespace *string `json:"namespace,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Hostname *v1.Hostname `json:"hostname,omitempty"`
+ ServingCertKeyPairSecret *SecretNameReferenceApplyConfiguration `json:"servingCertKeyPairSecret,omitempty"`
+}
+
+// ComponentRouteSpecApplyConfiguration constructs an declarative configuration of the ComponentRouteSpec type for use with
+// apply.
+func ComponentRouteSpec() *ComponentRouteSpecApplyConfiguration {
+ return &ComponentRouteSpecApplyConfiguration{}
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ComponentRouteSpecApplyConfiguration) WithNamespace(value string) *ComponentRouteSpecApplyConfiguration {
+ b.Namespace = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ComponentRouteSpecApplyConfiguration) WithName(value string) *ComponentRouteSpecApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithHostname sets the Hostname field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Hostname field is set to the value of the last call.
+func (b *ComponentRouteSpecApplyConfiguration) WithHostname(value v1.Hostname) *ComponentRouteSpecApplyConfiguration {
+ b.Hostname = &value
+ return b
+}
+
+// WithServingCertKeyPairSecret sets the ServingCertKeyPairSecret field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ServingCertKeyPairSecret field is set to the value of the last call.
+func (b *ComponentRouteSpecApplyConfiguration) WithServingCertKeyPairSecret(value *SecretNameReferenceApplyConfiguration) *ComponentRouteSpecApplyConfiguration {
+ b.ServingCertKeyPairSecret = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go
new file mode 100644
index 0000000000..a26e24bd8f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/componentroutestatus.go
@@ -0,0 +1,96 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ComponentRouteStatusApplyConfiguration represents an declarative configuration of the ComponentRouteStatus type for use
+// with apply.
+type ComponentRouteStatusApplyConfiguration struct {
+ Namespace *string `json:"namespace,omitempty"`
+ Name *string `json:"name,omitempty"`
+ DefaultHostname *v1.Hostname `json:"defaultHostname,omitempty"`
+ ConsumingUsers []v1.ConsumingUser `json:"consumingUsers,omitempty"`
+ CurrentHostnames []v1.Hostname `json:"currentHostnames,omitempty"`
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"`
+}
+
+// ComponentRouteStatusApplyConfiguration constructs an declarative configuration of the ComponentRouteStatus type for use with
+// apply.
+func ComponentRouteStatus() *ComponentRouteStatusApplyConfiguration {
+ return &ComponentRouteStatusApplyConfiguration{}
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ComponentRouteStatusApplyConfiguration) WithNamespace(value string) *ComponentRouteStatusApplyConfiguration {
+ b.Namespace = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ComponentRouteStatusApplyConfiguration) WithName(value string) *ComponentRouteStatusApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithDefaultHostname sets the DefaultHostname field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DefaultHostname field is set to the value of the last call.
+func (b *ComponentRouteStatusApplyConfiguration) WithDefaultHostname(value v1.Hostname) *ComponentRouteStatusApplyConfiguration {
+ b.DefaultHostname = &value
+ return b
+}
+
+// WithConsumingUsers adds the given value to the ConsumingUsers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ConsumingUsers field.
+func (b *ComponentRouteStatusApplyConfiguration) WithConsumingUsers(values ...v1.ConsumingUser) *ComponentRouteStatusApplyConfiguration {
+ for i := range values {
+ b.ConsumingUsers = append(b.ConsumingUsers, values[i])
+ }
+ return b
+}
+
+// WithCurrentHostnames adds the given value to the CurrentHostnames field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the CurrentHostnames field.
+func (b *ComponentRouteStatusApplyConfiguration) WithCurrentHostnames(values ...v1.Hostname) *ComponentRouteStatusApplyConfiguration {
+ for i := range values {
+ b.CurrentHostnames = append(b.CurrentHostnames, values[i])
+ }
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *ComponentRouteStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ComponentRouteStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
+
+// WithRelatedObjects adds the given value to the RelatedObjects field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the RelatedObjects field.
+func (b *ComponentRouteStatusApplyConfiguration) WithRelatedObjects(values ...*ObjectReferenceApplyConfiguration) *ComponentRouteStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithRelatedObjects")
+ }
+ b.RelatedObjects = append(b.RelatedObjects, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go
new file mode 100644
index 0000000000..e6b9c27f24
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdate.go
@@ -0,0 +1,55 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ConditionalUpdateApplyConfiguration represents an declarative configuration of the ConditionalUpdate type for use
+// with apply.
+type ConditionalUpdateApplyConfiguration struct {
+ Release *ReleaseApplyConfiguration `json:"release,omitempty"`
+ Risks []ConditionalUpdateRiskApplyConfiguration `json:"risks,omitempty"`
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+}
+
+// ConditionalUpdateApplyConfiguration constructs an declarative configuration of the ConditionalUpdate type for use with
+// apply.
+func ConditionalUpdate() *ConditionalUpdateApplyConfiguration {
+ return &ConditionalUpdateApplyConfiguration{}
+}
+
+// WithRelease sets the Release field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Release field is set to the value of the last call.
+func (b *ConditionalUpdateApplyConfiguration) WithRelease(value *ReleaseApplyConfiguration) *ConditionalUpdateApplyConfiguration {
+ b.Release = value
+ return b
+}
+
+// WithRisks adds the given value to the Risks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Risks field.
+func (b *ConditionalUpdateApplyConfiguration) WithRisks(values ...*ConditionalUpdateRiskApplyConfiguration) *ConditionalUpdateApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithRisks")
+ }
+ b.Risks = append(b.Risks, *values[i])
+ }
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *ConditionalUpdateApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ConditionalUpdateApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go
new file mode 100644
index 0000000000..49ff03f59f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/conditionalupdaterisk.go
@@ -0,0 +1,55 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ConditionalUpdateRiskApplyConfiguration represents an declarative configuration of the ConditionalUpdateRisk type for use
+// with apply.
+type ConditionalUpdateRiskApplyConfiguration struct {
+ URL *string `json:"url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Message *string `json:"message,omitempty"`
+ MatchingRules []ClusterConditionApplyConfiguration `json:"matchingRules,omitempty"`
+}
+
+// ConditionalUpdateRiskApplyConfiguration constructs an declarative configuration of the ConditionalUpdateRisk type for use with
+// apply.
+func ConditionalUpdateRisk() *ConditionalUpdateRiskApplyConfiguration {
+ return &ConditionalUpdateRiskApplyConfiguration{}
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *ConditionalUpdateRiskApplyConfiguration) WithURL(value string) *ConditionalUpdateRiskApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ConditionalUpdateRiskApplyConfiguration) WithName(value string) *ConditionalUpdateRiskApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithMessage sets the Message field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Message field is set to the value of the last call.
+func (b *ConditionalUpdateRiskApplyConfiguration) WithMessage(value string) *ConditionalUpdateRiskApplyConfiguration {
+ b.Message = &value
+ return b
+}
+
+// WithMatchingRules adds the given value to the MatchingRules field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MatchingRules field.
+func (b *ConditionalUpdateRiskApplyConfiguration) WithMatchingRules(values ...*ClusterConditionApplyConfiguration) *ConditionalUpdateRiskApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithMatchingRules")
+ }
+ b.MatchingRules = append(b.MatchingRules, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go
new file mode 100644
index 0000000000..4f03bf8b1c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapfilereference.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ConfigMapFileReferenceApplyConfiguration represents an declarative configuration of the ConfigMapFileReference type for use
+// with apply.
+type ConfigMapFileReferenceApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Key *string `json:"key,omitempty"`
+}
+
+// ConfigMapFileReferenceApplyConfiguration constructs an declarative configuration of the ConfigMapFileReference type for use with
+// apply.
+func ConfigMapFileReference() *ConfigMapFileReferenceApplyConfiguration {
+ return &ConfigMapFileReferenceApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ConfigMapFileReferenceApplyConfiguration) WithName(value string) *ConfigMapFileReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithKey sets the Key field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Key field is set to the value of the last call.
+func (b *ConfigMapFileReferenceApplyConfiguration) WithKey(value string) *ConfigMapFileReferenceApplyConfiguration {
+ b.Key = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go
new file mode 100644
index 0000000000..b85607ef46
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/configmapnamereference.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ConfigMapNameReferenceApplyConfiguration represents an declarative configuration of the ConfigMapNameReference type for use
+// with apply.
+type ConfigMapNameReferenceApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+}
+
+// ConfigMapNameReferenceApplyConfiguration constructs an declarative configuration of the ConfigMapNameReference type for use with
+// apply.
+func ConfigMapNameReference() *ConfigMapNameReferenceApplyConfiguration {
+ return &ConfigMapNameReferenceApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ConfigMapNameReferenceApplyConfiguration) WithName(value string) *ConfigMapNameReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go
new file mode 100644
index 0000000000..64188c220c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ConsoleApplyConfiguration represents an declarative configuration of the Console type for use
+// with apply.
+type ConsoleApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ConsoleSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ConsoleStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// Console constructs an declarative configuration of the Console type for use with
+// apply.
+func Console(name string) *ConsoleApplyConfiguration {
+ b := &ConsoleApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Console")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractConsole extracts the applied configuration owned by fieldManager from
+// console. If no managedFields are found in console for fieldManager, a
+// ConsoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// console must be a unmodified Console API object that was retrieved from the Kubernetes API.
+// ExtractConsole provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractConsole(console *apiconfigv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) {
+ return extractConsole(console, fieldManager, "")
+}
+
+// ExtractConsoleStatus is the same as ExtractConsole except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractConsoleStatus(console *apiconfigv1.Console, fieldManager string) (*ConsoleApplyConfiguration, error) {
+ return extractConsole(console, fieldManager, "status")
+}
+
+func extractConsole(console *apiconfigv1.Console, fieldManager string, subresource string) (*ConsoleApplyConfiguration, error) {
+ b := &ConsoleApplyConfiguration{}
+ err := managedfields.ExtractInto(console, internal.Parser().Type("com.github.openshift.api.config.v1.Console"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(console.Name)
+
+ b.WithKind("Console")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithKind(value string) *ConsoleApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithAPIVersion(value string) *ConsoleApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithName(value string) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithGenerateName(value string) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithNamespace(value string) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithUID(value types.UID) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithResourceVersion(value string) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithGeneration(value int64) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ConsoleApplyConfiguration) WithLabels(entries map[string]string) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ConsoleApplyConfiguration) WithAnnotations(entries map[string]string) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ConsoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ConsoleApplyConfiguration) WithFinalizers(values ...string) *ConsoleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ConsoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithSpec(value *ConsoleSpecApplyConfiguration) *ConsoleApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfiguration) *ConsoleApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go
new file mode 100644
index 0000000000..5c352fb026
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consoleauthentication.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ConsoleAuthenticationApplyConfiguration represents an declarative configuration of the ConsoleAuthentication type for use
+// with apply.
+type ConsoleAuthenticationApplyConfiguration struct {
+ LogoutRedirect *string `json:"logoutRedirect,omitempty"`
+}
+
+// ConsoleAuthenticationApplyConfiguration constructs an declarative configuration of the ConsoleAuthentication type for use with
+// apply.
+func ConsoleAuthentication() *ConsoleAuthenticationApplyConfiguration {
+ return &ConsoleAuthenticationApplyConfiguration{}
+}
+
+// WithLogoutRedirect sets the LogoutRedirect field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LogoutRedirect field is set to the value of the last call.
+func (b *ConsoleAuthenticationApplyConfiguration) WithLogoutRedirect(value string) *ConsoleAuthenticationApplyConfiguration {
+ b.LogoutRedirect = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go
new file mode 100644
index 0000000000..ba76971068
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolespec.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ConsoleSpecApplyConfiguration represents an declarative configuration of the ConsoleSpec type for use
+// with apply.
+type ConsoleSpecApplyConfiguration struct {
+ Authentication *ConsoleAuthenticationApplyConfiguration `json:"authentication,omitempty"`
+}
+
+// ConsoleSpecApplyConfiguration constructs an declarative configuration of the ConsoleSpec type for use with
+// apply.
+func ConsoleSpec() *ConsoleSpecApplyConfiguration {
+ return &ConsoleSpecApplyConfiguration{}
+}
+
+// WithAuthentication sets the Authentication field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Authentication field is set to the value of the last call.
+func (b *ConsoleSpecApplyConfiguration) WithAuthentication(value *ConsoleAuthenticationApplyConfiguration) *ConsoleSpecApplyConfiguration {
+ b.Authentication = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go
new file mode 100644
index 0000000000..33e04b37a3
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/consolestatus.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ConsoleStatusApplyConfiguration represents an declarative configuration of the ConsoleStatus type for use
+// with apply.
+type ConsoleStatusApplyConfiguration struct {
+ ConsoleURL *string `json:"consoleURL,omitempty"`
+}
+
+// ConsoleStatusApplyConfiguration constructs an declarative configuration of the ConsoleStatus type for use with
+// apply.
+func ConsoleStatus() *ConsoleStatusApplyConfiguration {
+ return &ConsoleStatusApplyConfiguration{}
+}
+
+// WithConsoleURL sets the ConsoleURL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ConsoleURL field is set to the value of the last call.
+func (b *ConsoleStatusApplyConfiguration) WithConsoleURL(value string) *ConsoleStatusApplyConfiguration {
+ b.ConsoleURL = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go
new file mode 100644
index 0000000000..0ce419b288
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go
@@ -0,0 +1,40 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// CustomFeatureGatesApplyConfiguration represents an declarative configuration of the CustomFeatureGates type for use
+// with apply.
+type CustomFeatureGatesApplyConfiguration struct {
+ Enabled []v1.FeatureGateName `json:"enabled,omitempty"`
+ Disabled []v1.FeatureGateName `json:"disabled,omitempty"`
+}
+
+// CustomFeatureGatesApplyConfiguration constructs an declarative configuration of the CustomFeatureGates type for use with
+// apply.
+func CustomFeatureGates() *CustomFeatureGatesApplyConfiguration {
+ return &CustomFeatureGatesApplyConfiguration{}
+}
+
+// WithEnabled adds the given value to the Enabled field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Enabled field.
+func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...v1.FeatureGateName) *CustomFeatureGatesApplyConfiguration {
+ for i := range values {
+ b.Enabled = append(b.Enabled, values[i])
+ }
+ return b
+}
+
+// WithDisabled adds the given value to the Disabled field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Disabled field.
+func (b *CustomFeatureGatesApplyConfiguration) WithDisabled(values ...v1.FeatureGateName) *CustomFeatureGatesApplyConfiguration {
+ for i := range values {
+ b.Disabled = append(b.Disabled, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go
new file mode 100644
index 0000000000..cea54d882a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customtlsprofile.go
@@ -0,0 +1,37 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// CustomTLSProfileApplyConfiguration represents an declarative configuration of the CustomTLSProfile type for use
+// with apply.
+type CustomTLSProfileApplyConfiguration struct {
+ TLSProfileSpecApplyConfiguration `json:",inline"`
+}
+
+// CustomTLSProfileApplyConfiguration constructs an declarative configuration of the CustomTLSProfile type for use with
+// apply.
+func CustomTLSProfile() *CustomTLSProfileApplyConfiguration {
+ return &CustomTLSProfileApplyConfiguration{}
+}
+
+// WithCiphers adds the given value to the Ciphers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Ciphers field.
+func (b *CustomTLSProfileApplyConfiguration) WithCiphers(values ...string) *CustomTLSProfileApplyConfiguration {
+ for i := range values {
+ b.Ciphers = append(b.Ciphers, values[i])
+ }
+ return b
+}
+
+// WithMinTLSVersion sets the MinTLSVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinTLSVersion field is set to the value of the last call.
+func (b *CustomTLSProfileApplyConfiguration) WithMinTLSVersion(value configv1.TLSProtocolVersion) *CustomTLSProfileApplyConfiguration {
+ b.MinTLSVersion = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go
new file mode 100644
index 0000000000..bb312e756a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/deprecatedwebhooktokenauthenticator.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// DeprecatedWebhookTokenAuthenticatorApplyConfiguration represents an declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use
+// with apply.
+type DeprecatedWebhookTokenAuthenticatorApplyConfiguration struct {
+ KubeConfig *SecretNameReferenceApplyConfiguration `json:"kubeConfig,omitempty"`
+}
+
+// DeprecatedWebhookTokenAuthenticatorApplyConfiguration constructs an declarative configuration of the DeprecatedWebhookTokenAuthenticator type for use with
+// apply.
+func DeprecatedWebhookTokenAuthenticator() *DeprecatedWebhookTokenAuthenticatorApplyConfiguration {
+ return &DeprecatedWebhookTokenAuthenticatorApplyConfiguration{}
+}
+
+// WithKubeConfig sets the KubeConfig field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the KubeConfig field is set to the value of the last call.
+func (b *DeprecatedWebhookTokenAuthenticatorApplyConfiguration) WithKubeConfig(value *SecretNameReferenceApplyConfiguration) *DeprecatedWebhookTokenAuthenticatorApplyConfiguration {
+ b.KubeConfig = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go
new file mode 100644
index 0000000000..2567ddf02a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// DNSApplyConfiguration represents an declarative configuration of the DNS type for use
+// with apply.
+type DNSApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *DNSSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.DNSStatus `json:"status,omitempty"`
+}
+
+// DNS constructs an declarative configuration of the DNS type for use with
+// apply.
+func DNS(name string) *DNSApplyConfiguration {
+ b := &DNSApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("DNS")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractDNS extracts the applied configuration owned by fieldManager from
+// dNS. If no managedFields are found in dNS for fieldManager, a
+// DNSApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// dNS must be a unmodified DNS API object that was retrieved from the Kubernetes API.
+// ExtractDNS provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractDNS(dNS *apiconfigv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) {
+ return extractDNS(dNS, fieldManager, "")
+}
+
+// ExtractDNSStatus is the same as ExtractDNS except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDNSStatus(dNS *apiconfigv1.DNS, fieldManager string) (*DNSApplyConfiguration, error) {
+ return extractDNS(dNS, fieldManager, "status")
+}
+
+func extractDNS(dNS *apiconfigv1.DNS, fieldManager string, subresource string) (*DNSApplyConfiguration, error) {
+ b := &DNSApplyConfiguration{}
+ err := managedfields.ExtractInto(dNS, internal.Parser().Type("com.github.openshift.api.config.v1.DNS"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(dNS.Name)
+
+ b.WithKind("DNS")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithKind(value string) *DNSApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithAPIVersion(value string) *DNSApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithName(value string) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithGenerateName(value string) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithNamespace(value string) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithUID(value types.UID) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithResourceVersion(value string) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithGeneration(value int64) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *DNSApplyConfiguration) WithLabels(entries map[string]string) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *DNSApplyConfiguration) WithAnnotations(entries map[string]string) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *DNSApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *DNSApplyConfiguration) WithFinalizers(values ...string) *DNSApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *DNSApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithSpec(value *DNSSpecApplyConfiguration) *DNSApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *DNSApplyConfiguration) WithStatus(value apiconfigv1.DNSStatus) *DNSApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go
new file mode 100644
index 0000000000..8f43c8c5f8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// DNSPlatformSpecApplyConfiguration represents an declarative configuration of the DNSPlatformSpec type for use
+// with apply.
+type DNSPlatformSpecApplyConfiguration struct {
+ Type *v1.PlatformType `json:"type,omitempty"`
+ AWS *AWSDNSSpecApplyConfiguration `json:"aws,omitempty"`
+}
+
+// DNSPlatformSpecApplyConfiguration constructs an declarative configuration of the DNSPlatformSpec type for use with
+// apply.
+func DNSPlatformSpec() *DNSPlatformSpecApplyConfiguration {
+ return &DNSPlatformSpecApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *DNSPlatformSpecApplyConfiguration) WithType(value v1.PlatformType) *DNSPlatformSpecApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithAWS sets the AWS field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AWS field is set to the value of the last call.
+func (b *DNSPlatformSpecApplyConfiguration) WithAWS(value *AWSDNSSpecApplyConfiguration) *DNSPlatformSpecApplyConfiguration {
+ b.AWS = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go
new file mode 100644
index 0000000000..b534ef9435
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go
@@ -0,0 +1,50 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// DNSSpecApplyConfiguration represents an declarative configuration of the DNSSpec type for use
+// with apply.
+type DNSSpecApplyConfiguration struct {
+ BaseDomain *string `json:"baseDomain,omitempty"`
+ PublicZone *DNSZoneApplyConfiguration `json:"publicZone,omitempty"`
+ PrivateZone *DNSZoneApplyConfiguration `json:"privateZone,omitempty"`
+ Platform *DNSPlatformSpecApplyConfiguration `json:"platform,omitempty"`
+}
+
+// DNSSpecApplyConfiguration constructs an declarative configuration of the DNSSpec type for use with
+// apply.
+func DNSSpec() *DNSSpecApplyConfiguration {
+ return &DNSSpecApplyConfiguration{}
+}
+
+// WithBaseDomain sets the BaseDomain field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BaseDomain field is set to the value of the last call.
+func (b *DNSSpecApplyConfiguration) WithBaseDomain(value string) *DNSSpecApplyConfiguration {
+ b.BaseDomain = &value
+ return b
+}
+
+// WithPublicZone sets the PublicZone field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PublicZone field is set to the value of the last call.
+func (b *DNSSpecApplyConfiguration) WithPublicZone(value *DNSZoneApplyConfiguration) *DNSSpecApplyConfiguration {
+ b.PublicZone = value
+ return b
+}
+
+// WithPrivateZone sets the PrivateZone field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PrivateZone field is set to the value of the last call.
+func (b *DNSSpecApplyConfiguration) WithPrivateZone(value *DNSZoneApplyConfiguration) *DNSSpecApplyConfiguration {
+ b.PrivateZone = value
+ return b
+}
+
+// WithPlatform sets the Platform field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Platform field is set to the value of the last call.
+func (b *DNSSpecApplyConfiguration) WithPlatform(value *DNSPlatformSpecApplyConfiguration) *DNSSpecApplyConfiguration {
+ b.Platform = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go
new file mode 100644
index 0000000000..63b8d1fcc2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnszone.go
@@ -0,0 +1,38 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// DNSZoneApplyConfiguration represents an declarative configuration of the DNSZone type for use
+// with apply.
+type DNSZoneApplyConfiguration struct {
+ ID *string `json:"id,omitempty"`
+ Tags map[string]string `json:"tags,omitempty"`
+}
+
+// DNSZoneApplyConfiguration constructs an declarative configuration of the DNSZone type for use with
+// apply.
+func DNSZone() *DNSZoneApplyConfiguration {
+ return &DNSZoneApplyConfiguration{}
+}
+
+// WithID sets the ID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ID field is set to the value of the last call.
+func (b *DNSZoneApplyConfiguration) WithID(value string) *DNSZoneApplyConfiguration {
+ b.ID = &value
+ return b
+}
+
+// WithTags puts the entries into the Tags field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Tags field,
+// overwriting an existing map entries in Tags field with the same key.
+func (b *DNSZoneApplyConfiguration) WithTags(entries map[string]string) *DNSZoneApplyConfiguration {
+ if b.Tags == nil && len(entries) > 0 {
+ b.Tags = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Tags[k] = v
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go
new file mode 100644
index 0000000000..2dbb3c386d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/equinixmetalplatformstatus.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// EquinixMetalPlatformStatusApplyConfiguration represents an declarative configuration of the EquinixMetalPlatformStatus type for use
+// with apply.
+type EquinixMetalPlatformStatusApplyConfiguration struct {
+ APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"`
+ IngressIP *string `json:"ingressIP,omitempty"`
+}
+
+// EquinixMetalPlatformStatusApplyConfiguration constructs an declarative configuration of the EquinixMetalPlatformStatus type for use with
+// apply.
+func EquinixMetalPlatformStatus() *EquinixMetalPlatformStatusApplyConfiguration {
+ return &EquinixMetalPlatformStatusApplyConfiguration{}
+}
+
+// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalIP field is set to the value of the last call.
+func (b *EquinixMetalPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *EquinixMetalPlatformStatusApplyConfiguration {
+ b.APIServerInternalIP = &value
+ return b
+}
+
+// WithIngressIP sets the IngressIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IngressIP field is set to the value of the last call.
+func (b *EquinixMetalPlatformStatusApplyConfiguration) WithIngressIP(value string) *EquinixMetalPlatformStatusApplyConfiguration {
+ b.IngressIP = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go
new file mode 100644
index 0000000000..d9eb037ad0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalipconfig.go
@@ -0,0 +1,34 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ExternalIPConfigApplyConfiguration represents an declarative configuration of the ExternalIPConfig type for use
+// with apply.
+type ExternalIPConfigApplyConfiguration struct {
+ Policy *ExternalIPPolicyApplyConfiguration `json:"policy,omitempty"`
+ AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"`
+}
+
+// ExternalIPConfigApplyConfiguration constructs an declarative configuration of the ExternalIPConfig type for use with
+// apply.
+func ExternalIPConfig() *ExternalIPConfigApplyConfiguration {
+ return &ExternalIPConfigApplyConfiguration{}
+}
+
+// WithPolicy sets the Policy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Policy field is set to the value of the last call.
+func (b *ExternalIPConfigApplyConfiguration) WithPolicy(value *ExternalIPPolicyApplyConfiguration) *ExternalIPConfigApplyConfiguration {
+ b.Policy = value
+ return b
+}
+
+// WithAutoAssignCIDRs adds the given value to the AutoAssignCIDRs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AutoAssignCIDRs field.
+func (b *ExternalIPConfigApplyConfiguration) WithAutoAssignCIDRs(values ...string) *ExternalIPConfigApplyConfiguration {
+ for i := range values {
+ b.AutoAssignCIDRs = append(b.AutoAssignCIDRs, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go
new file mode 100644
index 0000000000..c368ffac83
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalippolicy.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ExternalIPPolicyApplyConfiguration represents an declarative configuration of the ExternalIPPolicy type for use
+// with apply.
+type ExternalIPPolicyApplyConfiguration struct {
+ AllowedCIDRs []string `json:"allowedCIDRs,omitempty"`
+ RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"`
+}
+
+// ExternalIPPolicyApplyConfiguration constructs an declarative configuration of the ExternalIPPolicy type for use with
+// apply.
+func ExternalIPPolicy() *ExternalIPPolicyApplyConfiguration {
+ return &ExternalIPPolicyApplyConfiguration{}
+}
+
+// WithAllowedCIDRs adds the given value to the AllowedCIDRs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AllowedCIDRs field.
+func (b *ExternalIPPolicyApplyConfiguration) WithAllowedCIDRs(values ...string) *ExternalIPPolicyApplyConfiguration {
+ for i := range values {
+ b.AllowedCIDRs = append(b.AllowedCIDRs, values[i])
+ }
+ return b
+}
+
+// WithRejectedCIDRs adds the given value to the RejectedCIDRs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the RejectedCIDRs field.
+func (b *ExternalIPPolicyApplyConfiguration) WithRejectedCIDRs(values ...string) *ExternalIPPolicyApplyConfiguration {
+ for i := range values {
+ b.RejectedCIDRs = append(b.RejectedCIDRs, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go
new file mode 100644
index 0000000000..e9d5ccae54
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ExternalPlatformSpecApplyConfiguration represents an declarative configuration of the ExternalPlatformSpec type for use
+// with apply.
+type ExternalPlatformSpecApplyConfiguration struct {
+ PlatformName *string `json:"platformName,omitempty"`
+}
+
+// ExternalPlatformSpecApplyConfiguration constructs an declarative configuration of the ExternalPlatformSpec type for use with
+// apply.
+func ExternalPlatformSpec() *ExternalPlatformSpecApplyConfiguration {
+ return &ExternalPlatformSpecApplyConfiguration{}
+}
+
+// WithPlatformName sets the PlatformName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PlatformName field is set to the value of the last call.
+func (b *ExternalPlatformSpecApplyConfiguration) WithPlatformName(value string) *ExternalPlatformSpecApplyConfiguration {
+ b.PlatformName = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go
new file mode 100644
index 0000000000..12e246227a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformstatus.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ExternalPlatformStatusApplyConfiguration represents an declarative configuration of the ExternalPlatformStatus type for use
+// with apply.
+type ExternalPlatformStatusApplyConfiguration struct {
+ CloudControllerManager *CloudControllerManagerStatusApplyConfiguration `json:"cloudControllerManager,omitempty"`
+}
+
+// ExternalPlatformStatusApplyConfiguration constructs an declarative configuration of the ExternalPlatformStatus type for use with
+// apply.
+func ExternalPlatformStatus() *ExternalPlatformStatusApplyConfiguration {
+ return &ExternalPlatformStatusApplyConfiguration{}
+}
+
+// WithCloudControllerManager sets the CloudControllerManager field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CloudControllerManager field is set to the value of the last call.
+func (b *ExternalPlatformStatusApplyConfiguration) WithCloudControllerManager(value *CloudControllerManagerStatusApplyConfiguration) *ExternalPlatformStatusApplyConfiguration {
+ b.CloudControllerManager = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go
new file mode 100644
index 0000000000..4ba3ab9c51
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// FeatureGateApplyConfiguration represents an declarative configuration of the FeatureGate type for use
+// with apply.
+type FeatureGateApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *FeatureGateSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *FeatureGateStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// FeatureGate constructs an declarative configuration of the FeatureGate type for use with
+// apply.
+func FeatureGate(name string) *FeatureGateApplyConfiguration {
+ b := &FeatureGateApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("FeatureGate")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractFeatureGate extracts the applied configuration owned by fieldManager from
+// featureGate. If no managedFields are found in featureGate for fieldManager, a
+// FeatureGateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// featureGate must be a unmodified FeatureGate API object that was retrieved from the Kubernetes API.
+// ExtractFeatureGate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractFeatureGate(featureGate *apiconfigv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) {
+ return extractFeatureGate(featureGate, fieldManager, "")
+}
+
+// ExtractFeatureGateStatus is the same as ExtractFeatureGate except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractFeatureGateStatus(featureGate *apiconfigv1.FeatureGate, fieldManager string) (*FeatureGateApplyConfiguration, error) {
+ return extractFeatureGate(featureGate, fieldManager, "status")
+}
+
+func extractFeatureGate(featureGate *apiconfigv1.FeatureGate, fieldManager string, subresource string) (*FeatureGateApplyConfiguration, error) {
+ b := &FeatureGateApplyConfiguration{}
+ err := managedfields.ExtractInto(featureGate, internal.Parser().Type("com.github.openshift.api.config.v1.FeatureGate"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(featureGate.Name)
+
+ b.WithKind("FeatureGate")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithKind(value string) *FeatureGateApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithAPIVersion(value string) *FeatureGateApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithName(value string) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithGenerateName(value string) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithNamespace(value string) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithUID(value types.UID) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithResourceVersion(value string) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithGeneration(value int64) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *FeatureGateApplyConfiguration) WithLabels(entries map[string]string) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *FeatureGateApplyConfiguration) WithAnnotations(entries map[string]string) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *FeatureGateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *FeatureGateApplyConfiguration) WithFinalizers(values ...string) *FeatureGateApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *FeatureGateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithSpec(value *FeatureGateSpecApplyConfiguration) *FeatureGateApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *FeatureGateApplyConfiguration) WithStatus(value *FeatureGateStatusApplyConfiguration) *FeatureGateApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go
new file mode 100644
index 0000000000..817cf44f61
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// FeatureGateAttributesApplyConfiguration represents an declarative configuration of the FeatureGateAttributes type for use
+// with apply.
+type FeatureGateAttributesApplyConfiguration struct {
+ Name *v1.FeatureGateName `json:"name,omitempty"`
+}
+
+// FeatureGateAttributesApplyConfiguration constructs an declarative configuration of the FeatureGateAttributes type for use with
+// apply.
+func FeatureGateAttributes() *FeatureGateAttributesApplyConfiguration {
+ return &FeatureGateAttributesApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *FeatureGateAttributesApplyConfiguration) WithName(value v1.FeatureGateName) *FeatureGateAttributesApplyConfiguration {
+ b.Name = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go
new file mode 100644
index 0000000000..61bd51ca2d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go
@@ -0,0 +1,51 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// FeatureGateDetailsApplyConfiguration represents an declarative configuration of the FeatureGateDetails type for use
+// with apply.
+type FeatureGateDetailsApplyConfiguration struct {
+ Version *string `json:"version,omitempty"`
+ Enabled []FeatureGateAttributesApplyConfiguration `json:"enabled,omitempty"`
+ Disabled []FeatureGateAttributesApplyConfiguration `json:"disabled,omitempty"`
+}
+
+// FeatureGateDetailsApplyConfiguration constructs an declarative configuration of the FeatureGateDetails type for use with
+// apply.
+func FeatureGateDetails() *FeatureGateDetailsApplyConfiguration {
+ return &FeatureGateDetailsApplyConfiguration{}
+}
+
+// WithVersion sets the Version field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Version field is set to the value of the last call.
+func (b *FeatureGateDetailsApplyConfiguration) WithVersion(value string) *FeatureGateDetailsApplyConfiguration {
+ b.Version = &value
+ return b
+}
+
+// WithEnabled adds the given value to the Enabled field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Enabled field.
+func (b *FeatureGateDetailsApplyConfiguration) WithEnabled(values ...*FeatureGateAttributesApplyConfiguration) *FeatureGateDetailsApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithEnabled")
+ }
+ b.Enabled = append(b.Enabled, *values[i])
+ }
+ return b
+}
+
+// WithDisabled adds the given value to the Disabled field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Disabled field.
+func (b *FeatureGateDetailsApplyConfiguration) WithDisabled(values ...*FeatureGateAttributesApplyConfiguration) *FeatureGateDetailsApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithDisabled")
+ }
+ b.Disabled = append(b.Disabled, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go
new file mode 100644
index 0000000000..f22ead2c0e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateselection.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// FeatureGateSelectionApplyConfiguration represents an declarative configuration of the FeatureGateSelection type for use
+// with apply.
+type FeatureGateSelectionApplyConfiguration struct {
+ FeatureSet *v1.FeatureSet `json:"featureSet,omitempty"`
+ CustomNoUpgrade *CustomFeatureGatesApplyConfiguration `json:"customNoUpgrade,omitempty"`
+}
+
+// FeatureGateSelectionApplyConfiguration constructs an declarative configuration of the FeatureGateSelection type for use with
+// apply.
+func FeatureGateSelection() *FeatureGateSelectionApplyConfiguration {
+ return &FeatureGateSelectionApplyConfiguration{}
+}
+
+// WithFeatureSet sets the FeatureSet field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FeatureSet field is set to the value of the last call.
+func (b *FeatureGateSelectionApplyConfiguration) WithFeatureSet(value v1.FeatureSet) *FeatureGateSelectionApplyConfiguration {
+ b.FeatureSet = &value
+ return b
+}
+
+// WithCustomNoUpgrade sets the CustomNoUpgrade field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CustomNoUpgrade field is set to the value of the last call.
+func (b *FeatureGateSelectionApplyConfiguration) WithCustomNoUpgrade(value *CustomFeatureGatesApplyConfiguration) *FeatureGateSelectionApplyConfiguration {
+ b.CustomNoUpgrade = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go
new file mode 100644
index 0000000000..71a7464198
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatespec.go
@@ -0,0 +1,35 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// FeatureGateSpecApplyConfiguration represents an declarative configuration of the FeatureGateSpec type for use
+// with apply.
+type FeatureGateSpecApplyConfiguration struct {
+ FeatureGateSelectionApplyConfiguration `json:",inline"`
+}
+
+// FeatureGateSpecApplyConfiguration constructs an declarative configuration of the FeatureGateSpec type for use with
+// apply.
+func FeatureGateSpec() *FeatureGateSpecApplyConfiguration {
+ return &FeatureGateSpecApplyConfiguration{}
+}
+
+// WithFeatureSet sets the FeatureSet field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FeatureSet field is set to the value of the last call.
+func (b *FeatureGateSpecApplyConfiguration) WithFeatureSet(value configv1.FeatureSet) *FeatureGateSpecApplyConfiguration {
+ b.FeatureSet = &value
+ return b
+}
+
+// WithCustomNoUpgrade sets the CustomNoUpgrade field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CustomNoUpgrade field is set to the value of the last call.
+func (b *FeatureGateSpecApplyConfiguration) WithCustomNoUpgrade(value *CustomFeatureGatesApplyConfiguration) *FeatureGateSpecApplyConfiguration {
+ b.CustomNoUpgrade = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go
new file mode 100644
index 0000000000..fd09f59dad
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go
@@ -0,0 +1,46 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// FeatureGateStatusApplyConfiguration represents an declarative configuration of the FeatureGateStatus type for use
+// with apply.
+type FeatureGateStatusApplyConfiguration struct {
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ FeatureGates []FeatureGateDetailsApplyConfiguration `json:"featureGates,omitempty"`
+}
+
+// FeatureGateStatusApplyConfiguration constructs an declarative configuration of the FeatureGateStatus type for use with
+// apply.
+func FeatureGateStatus() *FeatureGateStatusApplyConfiguration {
+ return &FeatureGateStatusApplyConfiguration{}
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *FeatureGateStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *FeatureGateStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
+
+// WithFeatureGates adds the given value to the FeatureGates field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the FeatureGates field.
+func (b *FeatureGateStatusApplyConfiguration) WithFeatureGates(values ...*FeatureGateDetailsApplyConfiguration) *FeatureGateStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithFeatureGates")
+ }
+ b.FeatureGates = append(b.FeatureGates, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go
new file mode 100644
index 0000000000..9e35e3c60b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpplatformstatus.go
@@ -0,0 +1,69 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// GCPPlatformStatusApplyConfiguration represents an declarative configuration of the GCPPlatformStatus type for use
+// with apply.
+type GCPPlatformStatusApplyConfiguration struct {
+ ProjectID *string `json:"projectID,omitempty"`
+ Region *string `json:"region,omitempty"`
+ ResourceLabels []GCPResourceLabelApplyConfiguration `json:"resourceLabels,omitempty"`
+ ResourceTags []GCPResourceTagApplyConfiguration `json:"resourceTags,omitempty"`
+ CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"`
+}
+
+// GCPPlatformStatusApplyConfiguration constructs an declarative configuration of the GCPPlatformStatus type for use with
+// apply.
+func GCPPlatformStatus() *GCPPlatformStatusApplyConfiguration {
+ return &GCPPlatformStatusApplyConfiguration{}
+}
+
+// WithProjectID sets the ProjectID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ProjectID field is set to the value of the last call.
+func (b *GCPPlatformStatusApplyConfiguration) WithProjectID(value string) *GCPPlatformStatusApplyConfiguration {
+ b.ProjectID = &value
+ return b
+}
+
+// WithRegion sets the Region field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Region field is set to the value of the last call.
+func (b *GCPPlatformStatusApplyConfiguration) WithRegion(value string) *GCPPlatformStatusApplyConfiguration {
+ b.Region = &value
+ return b
+}
+
+// WithResourceLabels adds the given value to the ResourceLabels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceLabels field.
+func (b *GCPPlatformStatusApplyConfiguration) WithResourceLabels(values ...*GCPResourceLabelApplyConfiguration) *GCPPlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResourceLabels")
+ }
+ b.ResourceLabels = append(b.ResourceLabels, *values[i])
+ }
+ return b
+}
+
+// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceTags field.
+func (b *GCPPlatformStatusApplyConfiguration) WithResourceTags(values ...*GCPResourceTagApplyConfiguration) *GCPPlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResourceTags")
+ }
+ b.ResourceTags = append(b.ResourceTags, *values[i])
+ }
+ return b
+}
+
+// WithCloudLoadBalancerConfig sets the CloudLoadBalancerConfig field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CloudLoadBalancerConfig field is set to the value of the last call.
+func (b *GCPPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value *CloudLoadBalancerConfigApplyConfiguration) *GCPPlatformStatusApplyConfiguration {
+ b.CloudLoadBalancerConfig = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go
new file mode 100644
index 0000000000..685b14fe13
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcelabel.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// GCPResourceLabelApplyConfiguration represents an declarative configuration of the GCPResourceLabel type for use
+// with apply.
+type GCPResourceLabelApplyConfiguration struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// GCPResourceLabelApplyConfiguration constructs an declarative configuration of the GCPResourceLabel type for use with
+// apply.
+func GCPResourceLabel() *GCPResourceLabelApplyConfiguration {
+ return &GCPResourceLabelApplyConfiguration{}
+}
+
+// WithKey sets the Key field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Key field is set to the value of the last call.
+func (b *GCPResourceLabelApplyConfiguration) WithKey(value string) *GCPResourceLabelApplyConfiguration {
+ b.Key = &value
+ return b
+}
+
+// WithValue sets the Value field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Value field is set to the value of the last call.
+func (b *GCPResourceLabelApplyConfiguration) WithValue(value string) *GCPResourceLabelApplyConfiguration {
+ b.Value = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go
new file mode 100644
index 0000000000..9611b28534
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gcpresourcetag.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// GCPResourceTagApplyConfiguration represents an declarative configuration of the GCPResourceTag type for use
+// with apply.
+type GCPResourceTagApplyConfiguration struct {
+ ParentID *string `json:"parentID,omitempty"`
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// GCPResourceTagApplyConfiguration constructs an declarative configuration of the GCPResourceTag type for use with
+// apply.
+func GCPResourceTag() *GCPResourceTagApplyConfiguration {
+ return &GCPResourceTagApplyConfiguration{}
+}
+
+// WithParentID sets the ParentID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ParentID field is set to the value of the last call.
+func (b *GCPResourceTagApplyConfiguration) WithParentID(value string) *GCPResourceTagApplyConfiguration {
+ b.ParentID = &value
+ return b
+}
+
+// WithKey sets the Key field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Key field is set to the value of the last call.
+func (b *GCPResourceTagApplyConfiguration) WithKey(value string) *GCPResourceTagApplyConfiguration {
+ b.Key = &value
+ return b
+}
+
+// WithValue sets the Value field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Value field is set to the value of the last call.
+func (b *GCPResourceTagApplyConfiguration) WithValue(value string) *GCPResourceTagApplyConfiguration {
+ b.Value = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go
new file mode 100644
index 0000000000..bdaa2c7ac0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/githubidentityprovider.go
@@ -0,0 +1,72 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// GitHubIdentityProviderApplyConfiguration represents an declarative configuration of the GitHubIdentityProvider type for use
+// with apply.
+type GitHubIdentityProviderApplyConfiguration struct {
+ ClientID *string `json:"clientID,omitempty"`
+ ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"`
+ Organizations []string `json:"organizations,omitempty"`
+ Teams []string `json:"teams,omitempty"`
+ Hostname *string `json:"hostname,omitempty"`
+ CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"`
+}
+
+// GitHubIdentityProviderApplyConfiguration constructs an declarative configuration of the GitHubIdentityProvider type for use with
+// apply.
+func GitHubIdentityProvider() *GitHubIdentityProviderApplyConfiguration {
+ return &GitHubIdentityProviderApplyConfiguration{}
+}
+
+// WithClientID sets the ClientID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientID field is set to the value of the last call.
+func (b *GitHubIdentityProviderApplyConfiguration) WithClientID(value string) *GitHubIdentityProviderApplyConfiguration {
+ b.ClientID = &value
+ return b
+}
+
+// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientSecret field is set to the value of the last call.
+func (b *GitHubIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *GitHubIdentityProviderApplyConfiguration {
+ b.ClientSecret = value
+ return b
+}
+
+// WithOrganizations adds the given value to the Organizations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Organizations field.
+func (b *GitHubIdentityProviderApplyConfiguration) WithOrganizations(values ...string) *GitHubIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.Organizations = append(b.Organizations, values[i])
+ }
+ return b
+}
+
+// WithTeams adds the given value to the Teams field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Teams field.
+func (b *GitHubIdentityProviderApplyConfiguration) WithTeams(values ...string) *GitHubIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.Teams = append(b.Teams, values[i])
+ }
+ return b
+}
+
+// WithHostname sets the Hostname field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Hostname field is set to the value of the last call.
+func (b *GitHubIdentityProviderApplyConfiguration) WithHostname(value string) *GitHubIdentityProviderApplyConfiguration {
+ b.Hostname = &value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *GitHubIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *GitHubIdentityProviderApplyConfiguration {
+ b.CA = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go
new file mode 100644
index 0000000000..ece6b0eefd
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gitlabidentityprovider.go
@@ -0,0 +1,50 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// GitLabIdentityProviderApplyConfiguration represents an declarative configuration of the GitLabIdentityProvider type for use
+// with apply.
+type GitLabIdentityProviderApplyConfiguration struct {
+ ClientID *string `json:"clientID,omitempty"`
+ ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"`
+ URL *string `json:"url,omitempty"`
+ CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"`
+}
+
+// GitLabIdentityProviderApplyConfiguration constructs an declarative configuration of the GitLabIdentityProvider type for use with
+// apply.
+func GitLabIdentityProvider() *GitLabIdentityProviderApplyConfiguration {
+ return &GitLabIdentityProviderApplyConfiguration{}
+}
+
+// WithClientID sets the ClientID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientID field is set to the value of the last call.
+func (b *GitLabIdentityProviderApplyConfiguration) WithClientID(value string) *GitLabIdentityProviderApplyConfiguration {
+ b.ClientID = &value
+ return b
+}
+
+// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientSecret field is set to the value of the last call.
+func (b *GitLabIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *GitLabIdentityProviderApplyConfiguration {
+ b.ClientSecret = value
+ return b
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *GitLabIdentityProviderApplyConfiguration) WithURL(value string) *GitLabIdentityProviderApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *GitLabIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *GitLabIdentityProviderApplyConfiguration {
+ b.CA = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go
new file mode 100644
index 0000000000..1d38e58452
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/googleidentityprovider.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// GoogleIdentityProviderApplyConfiguration represents an declarative configuration of the GoogleIdentityProvider type for use
+// with apply.
+type GoogleIdentityProviderApplyConfiguration struct {
+ ClientID *string `json:"clientID,omitempty"`
+ ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"`
+ HostedDomain *string `json:"hostedDomain,omitempty"`
+}
+
+// GoogleIdentityProviderApplyConfiguration constructs an declarative configuration of the GoogleIdentityProvider type for use with
+// apply.
+func GoogleIdentityProvider() *GoogleIdentityProviderApplyConfiguration {
+ return &GoogleIdentityProviderApplyConfiguration{}
+}
+
+// WithClientID sets the ClientID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientID field is set to the value of the last call.
+func (b *GoogleIdentityProviderApplyConfiguration) WithClientID(value string) *GoogleIdentityProviderApplyConfiguration {
+ b.ClientID = &value
+ return b
+}
+
+// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientSecret field is set to the value of the last call.
+func (b *GoogleIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *GoogleIdentityProviderApplyConfiguration {
+ b.ClientSecret = value
+ return b
+}
+
+// WithHostedDomain sets the HostedDomain field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HostedDomain field is set to the value of the last call.
+func (b *GoogleIdentityProviderApplyConfiguration) WithHostedDomain(value string) *GoogleIdentityProviderApplyConfiguration {
+ b.HostedDomain = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go
new file mode 100644
index 0000000000..719b874354
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/htpasswdidentityprovider.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// HTPasswdIdentityProviderApplyConfiguration represents an declarative configuration of the HTPasswdIdentityProvider type for use
+// with apply.
+type HTPasswdIdentityProviderApplyConfiguration struct {
+ FileData *SecretNameReferenceApplyConfiguration `json:"fileData,omitempty"`
+}
+
+// HTPasswdIdentityProviderApplyConfiguration constructs an declarative configuration of the HTPasswdIdentityProvider type for use with
+// apply.
+func HTPasswdIdentityProvider() *HTPasswdIdentityProviderApplyConfiguration {
+ return &HTPasswdIdentityProviderApplyConfiguration{}
+}
+
+// WithFileData sets the FileData field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FileData field is set to the value of the last call.
+func (b *HTPasswdIdentityProviderApplyConfiguration) WithFileData(value *SecretNameReferenceApplyConfiguration) *HTPasswdIdentityProviderApplyConfiguration {
+ b.FileData = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go
new file mode 100644
index 0000000000..6b0683b9f4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsource.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// HubSourceApplyConfiguration represents an declarative configuration of the HubSource type for use
+// with apply.
+type HubSourceApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Disabled *bool `json:"disabled,omitempty"`
+}
+
+// HubSourceApplyConfiguration constructs an declarative configuration of the HubSource type for use with
+// apply.
+func HubSource() *HubSourceApplyConfiguration {
+ return &HubSourceApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *HubSourceApplyConfiguration) WithName(value string) *HubSourceApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithDisabled sets the Disabled field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Disabled field is set to the value of the last call.
+func (b *HubSourceApplyConfiguration) WithDisabled(value bool) *HubSourceApplyConfiguration {
+ b.Disabled = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go
new file mode 100644
index 0000000000..6c466f96e9
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/hubsourcestatus.go
@@ -0,0 +1,57 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// HubSourceStatusApplyConfiguration represents an declarative configuration of the HubSourceStatus type for use
+// with apply.
+type HubSourceStatusApplyConfiguration struct {
+ *HubSourceApplyConfiguration `json:"HubSource,omitempty"`
+ Status *string `json:"status,omitempty"`
+ Message *string `json:"message,omitempty"`
+}
+
+// HubSourceStatusApplyConfiguration constructs an declarative configuration of the HubSourceStatus type for use with
+// apply.
+func HubSourceStatus() *HubSourceStatusApplyConfiguration {
+ return &HubSourceStatusApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *HubSourceStatusApplyConfiguration) WithName(value string) *HubSourceStatusApplyConfiguration {
+ b.ensureHubSourceApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithDisabled sets the Disabled field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Disabled field is set to the value of the last call.
+func (b *HubSourceStatusApplyConfiguration) WithDisabled(value bool) *HubSourceStatusApplyConfiguration {
+ b.ensureHubSourceApplyConfigurationExists()
+ b.Disabled = &value
+ return b
+}
+
+func (b *HubSourceStatusApplyConfiguration) ensureHubSourceApplyConfigurationExists() {
+ if b.HubSourceApplyConfiguration == nil {
+ b.HubSourceApplyConfiguration = &HubSourceApplyConfiguration{}
+ }
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *HubSourceStatusApplyConfiguration) WithStatus(value string) *HubSourceStatusApplyConfiguration {
+ b.Status = &value
+ return b
+}
+
+// WithMessage sets the Message field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Message field is set to the value of the last call.
+func (b *HubSourceStatusApplyConfiguration) WithMessage(value string) *HubSourceStatusApplyConfiguration {
+ b.Message = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go
new file mode 100644
index 0000000000..9d1933377a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudplatformstatus.go
@@ -0,0 +1,77 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// IBMCloudPlatformStatusApplyConfiguration represents an declarative configuration of the IBMCloudPlatformStatus type for use
+// with apply.
+type IBMCloudPlatformStatusApplyConfiguration struct {
+ Location *string `json:"location,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ ProviderType *v1.IBMCloudProviderType `json:"providerType,omitempty"`
+ CISInstanceCRN *string `json:"cisInstanceCRN,omitempty"`
+ DNSInstanceCRN *string `json:"dnsInstanceCRN,omitempty"`
+ ServiceEndpoints []IBMCloudServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"`
+}
+
+// IBMCloudPlatformStatusApplyConfiguration constructs an declarative configuration of the IBMCloudPlatformStatus type for use with
+// apply.
+func IBMCloudPlatformStatus() *IBMCloudPlatformStatusApplyConfiguration {
+ return &IBMCloudPlatformStatusApplyConfiguration{}
+}
+
+// WithLocation sets the Location field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Location field is set to the value of the last call.
+func (b *IBMCloudPlatformStatusApplyConfiguration) WithLocation(value string) *IBMCloudPlatformStatusApplyConfiguration {
+ b.Location = &value
+ return b
+}
+
+// WithResourceGroupName sets the ResourceGroupName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceGroupName field is set to the value of the last call.
+func (b *IBMCloudPlatformStatusApplyConfiguration) WithResourceGroupName(value string) *IBMCloudPlatformStatusApplyConfiguration {
+ b.ResourceGroupName = &value
+ return b
+}
+
+// WithProviderType sets the ProviderType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ProviderType field is set to the value of the last call.
+func (b *IBMCloudPlatformStatusApplyConfiguration) WithProviderType(value v1.IBMCloudProviderType) *IBMCloudPlatformStatusApplyConfiguration {
+ b.ProviderType = &value
+ return b
+}
+
+// WithCISInstanceCRN sets the CISInstanceCRN field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CISInstanceCRN field is set to the value of the last call.
+func (b *IBMCloudPlatformStatusApplyConfiguration) WithCISInstanceCRN(value string) *IBMCloudPlatformStatusApplyConfiguration {
+ b.CISInstanceCRN = &value
+ return b
+}
+
+// WithDNSInstanceCRN sets the DNSInstanceCRN field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DNSInstanceCRN field is set to the value of the last call.
+func (b *IBMCloudPlatformStatusApplyConfiguration) WithDNSInstanceCRN(value string) *IBMCloudPlatformStatusApplyConfiguration {
+ b.DNSInstanceCRN = &value
+ return b
+}
+
+// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field.
+func (b *IBMCloudPlatformStatusApplyConfiguration) WithServiceEndpoints(values ...*IBMCloudServiceEndpointApplyConfiguration) *IBMCloudPlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithServiceEndpoints")
+ }
+ b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go
new file mode 100644
index 0000000000..7fc9f86325
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ibmcloudserviceendpoint.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// IBMCloudServiceEndpointApplyConfiguration represents an declarative configuration of the IBMCloudServiceEndpoint type for use
+// with apply.
+type IBMCloudServiceEndpointApplyConfiguration struct {
+ Name *v1.IBMCloudServiceName `json:"name,omitempty"`
+ URL *string `json:"url,omitempty"`
+}
+
+// IBMCloudServiceEndpointApplyConfiguration constructs an declarative configuration of the IBMCloudServiceEndpoint type for use with
+// apply.
+func IBMCloudServiceEndpoint() *IBMCloudServiceEndpointApplyConfiguration {
+ return &IBMCloudServiceEndpointApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *IBMCloudServiceEndpointApplyConfiguration) WithName(value v1.IBMCloudServiceName) *IBMCloudServiceEndpointApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *IBMCloudServiceEndpointApplyConfiguration) WithURL(value string) *IBMCloudServiceEndpointApplyConfiguration {
+ b.URL = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go
new file mode 100644
index 0000000000..869d822bb4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityprovider.go
@@ -0,0 +1,117 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// IdentityProviderApplyConfiguration represents an declarative configuration of the IdentityProvider type for use
+// with apply.
+type IdentityProviderApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ MappingMethod *v1.MappingMethodType `json:"mappingMethod,omitempty"`
+ IdentityProviderConfigApplyConfiguration `json:",inline"`
+}
+
+// IdentityProviderApplyConfiguration constructs an declarative configuration of the IdentityProvider type for use with
+// apply.
+func IdentityProvider() *IdentityProviderApplyConfiguration {
+ return &IdentityProviderApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithName(value string) *IdentityProviderApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithMappingMethod sets the MappingMethod field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MappingMethod field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithMappingMethod(value v1.MappingMethodType) *IdentityProviderApplyConfiguration {
+ b.MappingMethod = &value
+ return b
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithType(value v1.IdentityProviderType) *IdentityProviderApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BasicAuth field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithBasicAuth(value *BasicAuthIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.BasicAuth = value
+ return b
+}
+
+// WithGitHub sets the GitHub field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GitHub field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithGitHub(value *GitHubIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.GitHub = value
+ return b
+}
+
+// WithGitLab sets the GitLab field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GitLab field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithGitLab(value *GitLabIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.GitLab = value
+ return b
+}
+
+// WithGoogle sets the Google field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Google field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithGoogle(value *GoogleIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.Google = value
+ return b
+}
+
+// WithHTPasswd sets the HTPasswd field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HTPasswd field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithHTPasswd(value *HTPasswdIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.HTPasswd = value
+ return b
+}
+
+// WithKeystone sets the Keystone field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Keystone field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithKeystone(value *KeystoneIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.Keystone = value
+ return b
+}
+
+// WithLDAP sets the LDAP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LDAP field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithLDAP(value *LDAPIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.LDAP = value
+ return b
+}
+
+// WithOpenID sets the OpenID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the OpenID field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithOpenID(value *OpenIDIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.OpenID = value
+ return b
+}
+
+// WithRequestHeader sets the RequestHeader field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RequestHeader field is set to the value of the last call.
+func (b *IdentityProviderApplyConfiguration) WithRequestHeader(value *RequestHeaderIdentityProviderApplyConfiguration) *IdentityProviderApplyConfiguration {
+ b.RequestHeader = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go
new file mode 100644
index 0000000000..e87c122879
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/identityproviderconfig.go
@@ -0,0 +1,108 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// IdentityProviderConfigApplyConfiguration represents an declarative configuration of the IdentityProviderConfig type for use
+// with apply.
+type IdentityProviderConfigApplyConfiguration struct {
+ Type *v1.IdentityProviderType `json:"type,omitempty"`
+ BasicAuth *BasicAuthIdentityProviderApplyConfiguration `json:"basicAuth,omitempty"`
+ GitHub *GitHubIdentityProviderApplyConfiguration `json:"github,omitempty"`
+ GitLab *GitLabIdentityProviderApplyConfiguration `json:"gitlab,omitempty"`
+ Google *GoogleIdentityProviderApplyConfiguration `json:"google,omitempty"`
+ HTPasswd *HTPasswdIdentityProviderApplyConfiguration `json:"htpasswd,omitempty"`
+ Keystone *KeystoneIdentityProviderApplyConfiguration `json:"keystone,omitempty"`
+ LDAP *LDAPIdentityProviderApplyConfiguration `json:"ldap,omitempty"`
+ OpenID *OpenIDIdentityProviderApplyConfiguration `json:"openID,omitempty"`
+ RequestHeader *RequestHeaderIdentityProviderApplyConfiguration `json:"requestHeader,omitempty"`
+}
+
+// IdentityProviderConfigApplyConfiguration constructs an declarative configuration of the IdentityProviderConfig type for use with
+// apply.
+func IdentityProviderConfig() *IdentityProviderConfigApplyConfiguration {
+ return &IdentityProviderConfigApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithType(value v1.IdentityProviderType) *IdentityProviderConfigApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithBasicAuth sets the BasicAuth field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BasicAuth field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithBasicAuth(value *BasicAuthIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.BasicAuth = value
+ return b
+}
+
+// WithGitHub sets the GitHub field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GitHub field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithGitHub(value *GitHubIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.GitHub = value
+ return b
+}
+
+// WithGitLab sets the GitLab field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GitLab field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithGitLab(value *GitLabIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.GitLab = value
+ return b
+}
+
+// WithGoogle sets the Google field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Google field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithGoogle(value *GoogleIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.Google = value
+ return b
+}
+
+// WithHTPasswd sets the HTPasswd field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HTPasswd field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithHTPasswd(value *HTPasswdIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.HTPasswd = value
+ return b
+}
+
+// WithKeystone sets the Keystone field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Keystone field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithKeystone(value *KeystoneIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.Keystone = value
+ return b
+}
+
+// WithLDAP sets the LDAP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LDAP field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithLDAP(value *LDAPIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.LDAP = value
+ return b
+}
+
+// WithOpenID sets the OpenID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the OpenID field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithOpenID(value *OpenIDIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.OpenID = value
+ return b
+}
+
+// WithRequestHeader sets the RequestHeader field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RequestHeader field is set to the value of the last call.
+func (b *IdentityProviderConfigApplyConfiguration) WithRequestHeader(value *RequestHeaderIdentityProviderApplyConfiguration) *IdentityProviderConfigApplyConfiguration {
+ b.RequestHeader = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go
new file mode 100644
index 0000000000..f93741283b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ImageApplyConfiguration represents an declarative configuration of the Image type for use
+// with apply.
+type ImageApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ImageSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ImageStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// Image constructs an declarative configuration of the Image type for use with
+// apply.
+func Image(name string) *ImageApplyConfiguration {
+ b := &ImageApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Image")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractImage extracts the applied configuration owned by fieldManager from
+// image. If no managedFields are found in image for fieldManager, a
+// ImageApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// image must be a unmodified Image API object that was retrieved from the Kubernetes API.
+// ExtractImage provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractImage(image *apiconfigv1.Image, fieldManager string) (*ImageApplyConfiguration, error) {
+ return extractImage(image, fieldManager, "")
+}
+
+// ExtractImageStatus is the same as ExtractImage except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractImageStatus(image *apiconfigv1.Image, fieldManager string) (*ImageApplyConfiguration, error) {
+ return extractImage(image, fieldManager, "status")
+}
+
+func extractImage(image *apiconfigv1.Image, fieldManager string, subresource string) (*ImageApplyConfiguration, error) {
+ b := &ImageApplyConfiguration{}
+ err := managedfields.ExtractInto(image, internal.Parser().Type("com.github.openshift.api.config.v1.Image"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(image.Name)
+
+ b.WithKind("Image")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithKind(value string) *ImageApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithAPIVersion(value string) *ImageApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithName(value string) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithGenerateName(value string) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithNamespace(value string) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithUID(value types.UID) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithResourceVersion(value string) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithGeneration(value int64) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ImageApplyConfiguration) WithLabels(entries map[string]string) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ImageApplyConfiguration) WithAnnotations(entries map[string]string) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ImageApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ImageApplyConfiguration) WithFinalizers(values ...string) *ImageApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ImageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithSpec(value *ImageSpecApplyConfiguration) *ImageApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ImageApplyConfiguration) WithStatus(value *ImageStatusApplyConfiguration) *ImageApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go
new file mode 100644
index 0000000000..bded262d73
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go
@@ -0,0 +1,231 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ImageContentPolicyApplyConfiguration represents an declarative configuration of the ImageContentPolicy type for use
+// with apply.
+type ImageContentPolicyApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ImageContentPolicySpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// ImageContentPolicy constructs an declarative configuration of the ImageContentPolicy type for use with
+// apply.
+func ImageContentPolicy(name string) *ImageContentPolicyApplyConfiguration {
+ b := &ImageContentPolicyApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ImageContentPolicy")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractImageContentPolicy extracts the applied configuration owned by fieldManager from
+// imageContentPolicy. If no managedFields are found in imageContentPolicy for fieldManager, a
+// ImageContentPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// imageContentPolicy must be a unmodified ImageContentPolicy API object that was retrieved from the Kubernetes API.
+// ExtractImageContentPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractImageContentPolicy(imageContentPolicy *apiconfigv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) {
+ return extractImageContentPolicy(imageContentPolicy, fieldManager, "")
+}
+
+// ExtractImageContentPolicyStatus is the same as ExtractImageContentPolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractImageContentPolicyStatus(imageContentPolicy *apiconfigv1.ImageContentPolicy, fieldManager string) (*ImageContentPolicyApplyConfiguration, error) {
+ return extractImageContentPolicy(imageContentPolicy, fieldManager, "status")
+}
+
+func extractImageContentPolicy(imageContentPolicy *apiconfigv1.ImageContentPolicy, fieldManager string, subresource string) (*ImageContentPolicyApplyConfiguration, error) {
+ b := &ImageContentPolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(imageContentPolicy, internal.Parser().Type("com.github.openshift.api.config.v1.ImageContentPolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(imageContentPolicy.Name)
+
+ b.WithKind("ImageContentPolicy")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithKind(value string) *ImageContentPolicyApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithAPIVersion(value string) *ImageContentPolicyApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithName(value string) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithGenerateName(value string) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithNamespace(value string) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithUID(value types.UID) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithResourceVersion(value string) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithGeneration(value int64) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ImageContentPolicyApplyConfiguration) WithLabels(entries map[string]string) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ImageContentPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ImageContentPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ImageContentPolicyApplyConfiguration) WithFinalizers(values ...string) *ImageContentPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ImageContentPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ImageContentPolicyApplyConfiguration) WithSpec(value *ImageContentPolicySpecApplyConfiguration) *ImageContentPolicyApplyConfiguration {
+ b.Spec = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go
new file mode 100644
index 0000000000..5f063096f0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicyspec.go
@@ -0,0 +1,28 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ImageContentPolicySpecApplyConfiguration represents an declarative configuration of the ImageContentPolicySpec type for use
+// with apply.
+type ImageContentPolicySpecApplyConfiguration struct {
+ RepositoryDigestMirrors []RepositoryDigestMirrorsApplyConfiguration `json:"repositoryDigestMirrors,omitempty"`
+}
+
+// ImageContentPolicySpecApplyConfiguration constructs an declarative configuration of the ImageContentPolicySpec type for use with
+// apply.
+func ImageContentPolicySpec() *ImageContentPolicySpecApplyConfiguration {
+ return &ImageContentPolicySpecApplyConfiguration{}
+}
+
+// WithRepositoryDigestMirrors adds the given value to the RepositoryDigestMirrors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the RepositoryDigestMirrors field.
+func (b *ImageContentPolicySpecApplyConfiguration) WithRepositoryDigestMirrors(values ...*RepositoryDigestMirrorsApplyConfiguration) *ImageContentPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithRepositoryDigestMirrors")
+ }
+ b.RepositoryDigestMirrors = append(b.RepositoryDigestMirrors, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go
new file mode 100644
index 0000000000..47aa3bb82e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrors.go
@@ -0,0 +1,47 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ImageDigestMirrorsApplyConfiguration represents an declarative configuration of the ImageDigestMirrors type for use
+// with apply.
+type ImageDigestMirrorsApplyConfiguration struct {
+ Source *string `json:"source,omitempty"`
+ Mirrors []v1.ImageMirror `json:"mirrors,omitempty"`
+ MirrorSourcePolicy *v1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"`
+}
+
+// ImageDigestMirrorsApplyConfiguration constructs an declarative configuration of the ImageDigestMirrors type for use with
+// apply.
+func ImageDigestMirrors() *ImageDigestMirrorsApplyConfiguration {
+ return &ImageDigestMirrorsApplyConfiguration{}
+}
+
+// WithSource sets the Source field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Source field is set to the value of the last call.
+func (b *ImageDigestMirrorsApplyConfiguration) WithSource(value string) *ImageDigestMirrorsApplyConfiguration {
+ b.Source = &value
+ return b
+}
+
+// WithMirrors adds the given value to the Mirrors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Mirrors field.
+func (b *ImageDigestMirrorsApplyConfiguration) WithMirrors(values ...v1.ImageMirror) *ImageDigestMirrorsApplyConfiguration {
+ for i := range values {
+ b.Mirrors = append(b.Mirrors, values[i])
+ }
+ return b
+}
+
+// WithMirrorSourcePolicy sets the MirrorSourcePolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MirrorSourcePolicy field is set to the value of the last call.
+func (b *ImageDigestMirrorsApplyConfiguration) WithMirrorSourcePolicy(value v1.MirrorSourcePolicy) *ImageDigestMirrorsApplyConfiguration {
+ b.MirrorSourcePolicy = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go
new file mode 100644
index 0000000000..b1e2aab4fa
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ImageDigestMirrorSetApplyConfiguration represents an declarative configuration of the ImageDigestMirrorSet type for use
+// with apply.
+type ImageDigestMirrorSetApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ImageDigestMirrorSetSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.ImageDigestMirrorSetStatus `json:"status,omitempty"`
+}
+
+// ImageDigestMirrorSet constructs an declarative configuration of the ImageDigestMirrorSet type for use with
+// apply.
+func ImageDigestMirrorSet(name string) *ImageDigestMirrorSetApplyConfiguration {
+ b := &ImageDigestMirrorSetApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ImageDigestMirrorSet")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractImageDigestMirrorSet extracts the applied configuration owned by fieldManager from
+// imageDigestMirrorSet. If no managedFields are found in imageDigestMirrorSet for fieldManager, a
+// ImageDigestMirrorSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// imageDigestMirrorSet must be a unmodified ImageDigestMirrorSet API object that was retrieved from the Kubernetes API.
+// ExtractImageDigestMirrorSet provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractImageDigestMirrorSet(imageDigestMirrorSet *apiconfigv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) {
+ return extractImageDigestMirrorSet(imageDigestMirrorSet, fieldManager, "")
+}
+
+// ExtractImageDigestMirrorSetStatus is the same as ExtractImageDigestMirrorSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractImageDigestMirrorSetStatus(imageDigestMirrorSet *apiconfigv1.ImageDigestMirrorSet, fieldManager string) (*ImageDigestMirrorSetApplyConfiguration, error) {
+ return extractImageDigestMirrorSet(imageDigestMirrorSet, fieldManager, "status")
+}
+
+func extractImageDigestMirrorSet(imageDigestMirrorSet *apiconfigv1.ImageDigestMirrorSet, fieldManager string, subresource string) (*ImageDigestMirrorSetApplyConfiguration, error) {
+ b := &ImageDigestMirrorSetApplyConfiguration{}
+ err := managedfields.ExtractInto(imageDigestMirrorSet, internal.Parser().Type("com.github.openshift.api.config.v1.ImageDigestMirrorSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(imageDigestMirrorSet.Name)
+
+ b.WithKind("ImageDigestMirrorSet")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithKind(value string) *ImageDigestMirrorSetApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithAPIVersion(value string) *ImageDigestMirrorSetApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithName(value string) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithGenerateName(value string) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithNamespace(value string) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithUID(value types.UID) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithResourceVersion(value string) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithGeneration(value int64) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithLabels(entries map[string]string) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithAnnotations(entries map[string]string) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithFinalizers(values ...string) *ImageDigestMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ImageDigestMirrorSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithSpec(value *ImageDigestMirrorSetSpecApplyConfiguration) *ImageDigestMirrorSetApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ImageDigestMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.ImageDigestMirrorSetStatus) *ImageDigestMirrorSetApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go
new file mode 100644
index 0000000000..f34a0c0af7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorsetspec.go
@@ -0,0 +1,28 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ImageDigestMirrorSetSpecApplyConfiguration represents an declarative configuration of the ImageDigestMirrorSetSpec type for use
+// with apply.
+type ImageDigestMirrorSetSpecApplyConfiguration struct {
+ ImageDigestMirrors []ImageDigestMirrorsApplyConfiguration `json:"imageDigestMirrors,omitempty"`
+}
+
+// ImageDigestMirrorSetSpecApplyConfiguration constructs an declarative configuration of the ImageDigestMirrorSetSpec type for use with
+// apply.
+func ImageDigestMirrorSetSpec() *ImageDigestMirrorSetSpecApplyConfiguration {
+ return &ImageDigestMirrorSetSpecApplyConfiguration{}
+}
+
+// WithImageDigestMirrors adds the given value to the ImageDigestMirrors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ImageDigestMirrors field.
+func (b *ImageDigestMirrorSetSpecApplyConfiguration) WithImageDigestMirrors(values ...*ImageDigestMirrorsApplyConfiguration) *ImageDigestMirrorSetSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithImageDigestMirrors")
+ }
+ b.ImageDigestMirrors = append(b.ImageDigestMirrors, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go
new file mode 100644
index 0000000000..1199666c48
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagelabel.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ImageLabelApplyConfiguration represents an declarative configuration of the ImageLabel type for use
+// with apply.
+type ImageLabelApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+}
+
+// ImageLabelApplyConfiguration constructs an declarative configuration of the ImageLabel type for use with
+// apply.
+func ImageLabel() *ImageLabelApplyConfiguration {
+ return &ImageLabelApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ImageLabelApplyConfiguration) WithName(value string) *ImageLabelApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithValue sets the Value field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Value field is set to the value of the last call.
+func (b *ImageLabelApplyConfiguration) WithValue(value string) *ImageLabelApplyConfiguration {
+ b.Value = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go
new file mode 100644
index 0000000000..10e80e77fa
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagespec.go
@@ -0,0 +1,57 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ImageSpecApplyConfiguration represents an declarative configuration of the ImageSpec type for use
+// with apply.
+type ImageSpecApplyConfiguration struct {
+ AllowedRegistriesForImport []RegistryLocationApplyConfiguration `json:"allowedRegistriesForImport,omitempty"`
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
+ AdditionalTrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"additionalTrustedCA,omitempty"`
+ RegistrySources *RegistrySourcesApplyConfiguration `json:"registrySources,omitempty"`
+}
+
+// ImageSpecApplyConfiguration constructs an declarative configuration of the ImageSpec type for use with
+// apply.
+func ImageSpec() *ImageSpecApplyConfiguration {
+ return &ImageSpecApplyConfiguration{}
+}
+
+// WithAllowedRegistriesForImport adds the given value to the AllowedRegistriesForImport field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AllowedRegistriesForImport field.
+func (b *ImageSpecApplyConfiguration) WithAllowedRegistriesForImport(values ...*RegistryLocationApplyConfiguration) *ImageSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithAllowedRegistriesForImport")
+ }
+ b.AllowedRegistriesForImport = append(b.AllowedRegistriesForImport, *values[i])
+ }
+ return b
+}
+
+// WithExternalRegistryHostnames adds the given value to the ExternalRegistryHostnames field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ExternalRegistryHostnames field.
+func (b *ImageSpecApplyConfiguration) WithExternalRegistryHostnames(values ...string) *ImageSpecApplyConfiguration {
+ for i := range values {
+ b.ExternalRegistryHostnames = append(b.ExternalRegistryHostnames, values[i])
+ }
+ return b
+}
+
+// WithAdditionalTrustedCA sets the AdditionalTrustedCA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AdditionalTrustedCA field is set to the value of the last call.
+func (b *ImageSpecApplyConfiguration) WithAdditionalTrustedCA(value *ConfigMapNameReferenceApplyConfiguration) *ImageSpecApplyConfiguration {
+ b.AdditionalTrustedCA = value
+ return b
+}
+
+// WithRegistrySources sets the RegistrySources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RegistrySources field is set to the value of the last call.
+func (b *ImageSpecApplyConfiguration) WithRegistrySources(value *RegistrySourcesApplyConfiguration) *ImageSpecApplyConfiguration {
+ b.RegistrySources = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go
new file mode 100644
index 0000000000..38c90271a1
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagestatus.go
@@ -0,0 +1,34 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ImageStatusApplyConfiguration represents an declarative configuration of the ImageStatus type for use
+// with apply.
+type ImageStatusApplyConfiguration struct {
+ InternalRegistryHostname *string `json:"internalRegistryHostname,omitempty"`
+ ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"`
+}
+
+// ImageStatusApplyConfiguration constructs an declarative configuration of the ImageStatus type for use with
+// apply.
+func ImageStatus() *ImageStatusApplyConfiguration {
+ return &ImageStatusApplyConfiguration{}
+}
+
+// WithInternalRegistryHostname sets the InternalRegistryHostname field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the InternalRegistryHostname field is set to the value of the last call.
+func (b *ImageStatusApplyConfiguration) WithInternalRegistryHostname(value string) *ImageStatusApplyConfiguration {
+ b.InternalRegistryHostname = &value
+ return b
+}
+
+// WithExternalRegistryHostnames adds the given value to the ExternalRegistryHostnames field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ExternalRegistryHostnames field.
+func (b *ImageStatusApplyConfiguration) WithExternalRegistryHostnames(values ...string) *ImageStatusApplyConfiguration {
+ for i := range values {
+ b.ExternalRegistryHostnames = append(b.ExternalRegistryHostnames, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go
new file mode 100644
index 0000000000..212d0d01ab
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrors.go
@@ -0,0 +1,47 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ImageTagMirrorsApplyConfiguration represents an declarative configuration of the ImageTagMirrors type for use
+// with apply.
+type ImageTagMirrorsApplyConfiguration struct {
+ Source *string `json:"source,omitempty"`
+ Mirrors []v1.ImageMirror `json:"mirrors,omitempty"`
+ MirrorSourcePolicy *v1.MirrorSourcePolicy `json:"mirrorSourcePolicy,omitempty"`
+}
+
+// ImageTagMirrorsApplyConfiguration constructs an declarative configuration of the ImageTagMirrors type for use with
+// apply.
+func ImageTagMirrors() *ImageTagMirrorsApplyConfiguration {
+ return &ImageTagMirrorsApplyConfiguration{}
+}
+
+// WithSource sets the Source field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Source field is set to the value of the last call.
+func (b *ImageTagMirrorsApplyConfiguration) WithSource(value string) *ImageTagMirrorsApplyConfiguration {
+ b.Source = &value
+ return b
+}
+
+// WithMirrors adds the given value to the Mirrors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Mirrors field.
+func (b *ImageTagMirrorsApplyConfiguration) WithMirrors(values ...v1.ImageMirror) *ImageTagMirrorsApplyConfiguration {
+ for i := range values {
+ b.Mirrors = append(b.Mirrors, values[i])
+ }
+ return b
+}
+
+// WithMirrorSourcePolicy sets the MirrorSourcePolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MirrorSourcePolicy field is set to the value of the last call.
+func (b *ImageTagMirrorsApplyConfiguration) WithMirrorSourcePolicy(value v1.MirrorSourcePolicy) *ImageTagMirrorsApplyConfiguration {
+ b.MirrorSourcePolicy = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go
new file mode 100644
index 0000000000..1b85cc6007
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ImageTagMirrorSetApplyConfiguration represents an declarative configuration of the ImageTagMirrorSet type for use
+// with apply.
+type ImageTagMirrorSetApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ImageTagMirrorSetSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.ImageTagMirrorSetStatus `json:"status,omitempty"`
+}
+
+// ImageTagMirrorSet constructs an declarative configuration of the ImageTagMirrorSet type for use with
+// apply.
+func ImageTagMirrorSet(name string) *ImageTagMirrorSetApplyConfiguration {
+ b := &ImageTagMirrorSetApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ImageTagMirrorSet")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractImageTagMirrorSet extracts the applied configuration owned by fieldManager from
+// imageTagMirrorSet. If no managedFields are found in imageTagMirrorSet for fieldManager, a
+// ImageTagMirrorSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// imageTagMirrorSet must be a unmodified ImageTagMirrorSet API object that was retrieved from the Kubernetes API.
+// ExtractImageTagMirrorSet provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractImageTagMirrorSet(imageTagMirrorSet *apiconfigv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) {
+ return extractImageTagMirrorSet(imageTagMirrorSet, fieldManager, "")
+}
+
+// ExtractImageTagMirrorSetStatus is the same as ExtractImageTagMirrorSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractImageTagMirrorSetStatus(imageTagMirrorSet *apiconfigv1.ImageTagMirrorSet, fieldManager string) (*ImageTagMirrorSetApplyConfiguration, error) {
+ return extractImageTagMirrorSet(imageTagMirrorSet, fieldManager, "status")
+}
+
+func extractImageTagMirrorSet(imageTagMirrorSet *apiconfigv1.ImageTagMirrorSet, fieldManager string, subresource string) (*ImageTagMirrorSetApplyConfiguration, error) {
+ b := &ImageTagMirrorSetApplyConfiguration{}
+ err := managedfields.ExtractInto(imageTagMirrorSet, internal.Parser().Type("com.github.openshift.api.config.v1.ImageTagMirrorSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(imageTagMirrorSet.Name)
+
+ b.WithKind("ImageTagMirrorSet")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithKind(value string) *ImageTagMirrorSetApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithAPIVersion(value string) *ImageTagMirrorSetApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithName(value string) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithGenerateName(value string) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithNamespace(value string) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithUID(value types.UID) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithResourceVersion(value string) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithGeneration(value int64) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ImageTagMirrorSetApplyConfiguration) WithLabels(entries map[string]string) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ImageTagMirrorSetApplyConfiguration) WithAnnotations(entries map[string]string) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ImageTagMirrorSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ImageTagMirrorSetApplyConfiguration) WithFinalizers(values ...string) *ImageTagMirrorSetApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ImageTagMirrorSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithSpec(value *ImageTagMirrorSetSpecApplyConfiguration) *ImageTagMirrorSetApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ImageTagMirrorSetApplyConfiguration) WithStatus(value apiconfigv1.ImageTagMirrorSetStatus) *ImageTagMirrorSetApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go
new file mode 100644
index 0000000000..a6e1d9a390
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorsetspec.go
@@ -0,0 +1,28 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ImageTagMirrorSetSpecApplyConfiguration represents an declarative configuration of the ImageTagMirrorSetSpec type for use
+// with apply.
+type ImageTagMirrorSetSpecApplyConfiguration struct {
+ ImageTagMirrors []ImageTagMirrorsApplyConfiguration `json:"imageTagMirrors,omitempty"`
+}
+
+// ImageTagMirrorSetSpecApplyConfiguration constructs an declarative configuration of the ImageTagMirrorSetSpec type for use with
+// apply.
+func ImageTagMirrorSetSpec() *ImageTagMirrorSetSpecApplyConfiguration {
+ return &ImageTagMirrorSetSpecApplyConfiguration{}
+}
+
+// WithImageTagMirrors adds the given value to the ImageTagMirrors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ImageTagMirrors field.
+func (b *ImageTagMirrorSetSpecApplyConfiguration) WithImageTagMirrors(values ...*ImageTagMirrorsApplyConfiguration) *ImageTagMirrorSetSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithImageTagMirrors")
+ }
+ b.ImageTagMirrors = append(b.ImageTagMirrors, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go
new file mode 100644
index 0000000000..9a1fe21bd7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// InfrastructureApplyConfiguration represents an declarative configuration of the Infrastructure type for use
+// with apply.
+type InfrastructureApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *InfrastructureSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *InfrastructureStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// Infrastructure constructs an declarative configuration of the Infrastructure type for use with
+// apply.
+func Infrastructure(name string) *InfrastructureApplyConfiguration {
+ b := &InfrastructureApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Infrastructure")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractInfrastructure extracts the applied configuration owned by fieldManager from
+// infrastructure. If no managedFields are found in infrastructure for fieldManager, a
+// InfrastructureApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// infrastructure must be a unmodified Infrastructure API object that was retrieved from the Kubernetes API.
+// ExtractInfrastructure provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractInfrastructure(infrastructure *apiconfigv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) {
+ return extractInfrastructure(infrastructure, fieldManager, "")
+}
+
+// ExtractInfrastructureStatus is the same as ExtractInfrastructure except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractInfrastructureStatus(infrastructure *apiconfigv1.Infrastructure, fieldManager string) (*InfrastructureApplyConfiguration, error) {
+ return extractInfrastructure(infrastructure, fieldManager, "status")
+}
+
+func extractInfrastructure(infrastructure *apiconfigv1.Infrastructure, fieldManager string, subresource string) (*InfrastructureApplyConfiguration, error) {
+ b := &InfrastructureApplyConfiguration{}
+ err := managedfields.ExtractInto(infrastructure, internal.Parser().Type("com.github.openshift.api.config.v1.Infrastructure"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(infrastructure.Name)
+
+ b.WithKind("Infrastructure")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithKind(value string) *InfrastructureApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithAPIVersion(value string) *InfrastructureApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithName(value string) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithGenerateName(value string) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithNamespace(value string) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithUID(value types.UID) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithResourceVersion(value string) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithGeneration(value int64) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *InfrastructureApplyConfiguration) WithLabels(entries map[string]string) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *InfrastructureApplyConfiguration) WithAnnotations(entries map[string]string) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *InfrastructureApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *InfrastructureApplyConfiguration) WithFinalizers(values ...string) *InfrastructureApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *InfrastructureApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithSpec(value *InfrastructureSpecApplyConfiguration) *InfrastructureApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *InfrastructureApplyConfiguration) WithStatus(value *InfrastructureStatusApplyConfiguration) *InfrastructureApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go
new file mode 100644
index 0000000000..eb2f1636dc
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurespec.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// InfrastructureSpecApplyConfiguration represents an declarative configuration of the InfrastructureSpec type for use
+// with apply.
+type InfrastructureSpecApplyConfiguration struct {
+ CloudConfig *ConfigMapFileReferenceApplyConfiguration `json:"cloudConfig,omitempty"`
+ PlatformSpec *PlatformSpecApplyConfiguration `json:"platformSpec,omitempty"`
+}
+
+// InfrastructureSpecApplyConfiguration constructs an declarative configuration of the InfrastructureSpec type for use with
+// apply.
+func InfrastructureSpec() *InfrastructureSpecApplyConfiguration {
+ return &InfrastructureSpecApplyConfiguration{}
+}
+
+// WithCloudConfig sets the CloudConfig field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CloudConfig field is set to the value of the last call.
+func (b *InfrastructureSpecApplyConfiguration) WithCloudConfig(value *ConfigMapFileReferenceApplyConfiguration) *InfrastructureSpecApplyConfiguration {
+ b.CloudConfig = value
+ return b
+}
+
+// WithPlatformSpec sets the PlatformSpec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PlatformSpec field is set to the value of the last call.
+func (b *InfrastructureSpecApplyConfiguration) WithPlatformSpec(value *PlatformSpecApplyConfiguration) *InfrastructureSpecApplyConfiguration {
+ b.PlatformSpec = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go
new file mode 100644
index 0000000000..0f45b5562d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go
@@ -0,0 +1,99 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// InfrastructureStatusApplyConfiguration represents an declarative configuration of the InfrastructureStatus type for use
+// with apply.
+type InfrastructureStatusApplyConfiguration struct {
+ InfrastructureName *string `json:"infrastructureName,omitempty"`
+ Platform *v1.PlatformType `json:"platform,omitempty"`
+ PlatformStatus *PlatformStatusApplyConfiguration `json:"platformStatus,omitempty"`
+ EtcdDiscoveryDomain *string `json:"etcdDiscoveryDomain,omitempty"`
+ APIServerURL *string `json:"apiServerURL,omitempty"`
+ APIServerInternalURL *string `json:"apiServerInternalURI,omitempty"`
+ ControlPlaneTopology *v1.TopologyMode `json:"controlPlaneTopology,omitempty"`
+ InfrastructureTopology *v1.TopologyMode `json:"infrastructureTopology,omitempty"`
+ CPUPartitioning *v1.CPUPartitioningMode `json:"cpuPartitioning,omitempty"`
+}
+
+// InfrastructureStatusApplyConfiguration constructs an declarative configuration of the InfrastructureStatus type for use with
+// apply.
+func InfrastructureStatus() *InfrastructureStatusApplyConfiguration {
+ return &InfrastructureStatusApplyConfiguration{}
+}
+
+// WithInfrastructureName sets the InfrastructureName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the InfrastructureName field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureName(value string) *InfrastructureStatusApplyConfiguration {
+ b.InfrastructureName = &value
+ return b
+}
+
+// WithPlatform sets the Platform field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Platform field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithPlatform(value v1.PlatformType) *InfrastructureStatusApplyConfiguration {
+ b.Platform = &value
+ return b
+}
+
+// WithPlatformStatus sets the PlatformStatus field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PlatformStatus field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithPlatformStatus(value *PlatformStatusApplyConfiguration) *InfrastructureStatusApplyConfiguration {
+ b.PlatformStatus = value
+ return b
+}
+
+// WithEtcdDiscoveryDomain sets the EtcdDiscoveryDomain field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the EtcdDiscoveryDomain field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithEtcdDiscoveryDomain(value string) *InfrastructureStatusApplyConfiguration {
+ b.EtcdDiscoveryDomain = &value
+ return b
+}
+
+// WithAPIServerURL sets the APIServerURL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerURL field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithAPIServerURL(value string) *InfrastructureStatusApplyConfiguration {
+ b.APIServerURL = &value
+ return b
+}
+
+// WithAPIServerInternalURL sets the APIServerInternalURL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalURL field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithAPIServerInternalURL(value string) *InfrastructureStatusApplyConfiguration {
+ b.APIServerInternalURL = &value
+ return b
+}
+
+// WithControlPlaneTopology sets the ControlPlaneTopology field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ControlPlaneTopology field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithControlPlaneTopology(value v1.TopologyMode) *InfrastructureStatusApplyConfiguration {
+ b.ControlPlaneTopology = &value
+ return b
+}
+
+// WithInfrastructureTopology sets the InfrastructureTopology field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the InfrastructureTopology field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureTopology(value v1.TopologyMode) *InfrastructureStatusApplyConfiguration {
+ b.InfrastructureTopology = &value
+ return b
+}
+
+// WithCPUPartitioning sets the CPUPartitioning field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CPUPartitioning field is set to the value of the last call.
+func (b *InfrastructureStatusApplyConfiguration) WithCPUPartitioning(value v1.CPUPartitioningMode) *InfrastructureStatusApplyConfiguration {
+ b.CPUPartitioning = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go
new file mode 100644
index 0000000000..c3fb8b814b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use
+// with apply.
+type IngressApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// Ingress constructs an declarative configuration of the Ingress type for use with
+// apply.
+func Ingress(name string) *IngressApplyConfiguration {
+ b := &IngressApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Ingress")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractIngress extracts the applied configuration owned by fieldManager from
+// ingress. If no managedFields are found in ingress for fieldManager, a
+// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API.
+// ExtractIngress provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractIngress(ingress *apiconfigv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "")
+}
+
+// ExtractIngressStatus is the same as ExtractIngress except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractIngressStatus(ingress *apiconfigv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "status")
+}
+
+func extractIngress(ingress *apiconfigv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
+ b := &IngressApplyConfiguration{}
+ err := managedfields.ExtractInto(ingress, internal.Parser().Type("com.github.openshift.api.config.v1.Ingress"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(ingress.Name)
+
+ b.WithKind("Ingress")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithSpec(value *IngressSpecApplyConfiguration) *IngressApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfiguration) *IngressApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go
new file mode 100644
index 0000000000..ae1b18fd33
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressplatformspec.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// IngressPlatformSpecApplyConfiguration represents an declarative configuration of the IngressPlatformSpec type for use
+// with apply.
+type IngressPlatformSpecApplyConfiguration struct {
+ Type *v1.PlatformType `json:"type,omitempty"`
+ AWS *AWSIngressSpecApplyConfiguration `json:"aws,omitempty"`
+}
+
+// IngressPlatformSpecApplyConfiguration constructs an declarative configuration of the IngressPlatformSpec type for use with
+// apply.
+func IngressPlatformSpec() *IngressPlatformSpecApplyConfiguration {
+ return &IngressPlatformSpecApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *IngressPlatformSpecApplyConfiguration) WithType(value v1.PlatformType) *IngressPlatformSpecApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithAWS sets the AWS field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AWS field is set to the value of the last call.
+func (b *IngressPlatformSpecApplyConfiguration) WithAWS(value *AWSIngressSpecApplyConfiguration) *IngressPlatformSpecApplyConfiguration {
+ b.AWS = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go
new file mode 100644
index 0000000000..d934e664be
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go
@@ -0,0 +1,69 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use
+// with apply.
+type IngressSpecApplyConfiguration struct {
+ Domain *string `json:"domain,omitempty"`
+ AppsDomain *string `json:"appsDomain,omitempty"`
+ ComponentRoutes []ComponentRouteSpecApplyConfiguration `json:"componentRoutes,omitempty"`
+ RequiredHSTSPolicies []RequiredHSTSPolicyApplyConfiguration `json:"requiredHSTSPolicies,omitempty"`
+ LoadBalancer *LoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"`
+}
+
+// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with
+// apply.
+func IngressSpec() *IngressSpecApplyConfiguration {
+ return &IngressSpecApplyConfiguration{}
+}
+
+// WithDomain sets the Domain field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Domain field is set to the value of the last call.
+func (b *IngressSpecApplyConfiguration) WithDomain(value string) *IngressSpecApplyConfiguration {
+ b.Domain = &value
+ return b
+}
+
+// WithAppsDomain sets the AppsDomain field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AppsDomain field is set to the value of the last call.
+func (b *IngressSpecApplyConfiguration) WithAppsDomain(value string) *IngressSpecApplyConfiguration {
+ b.AppsDomain = &value
+ return b
+}
+
+// WithComponentRoutes adds the given value to the ComponentRoutes field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ComponentRoutes field.
+func (b *IngressSpecApplyConfiguration) WithComponentRoutes(values ...*ComponentRouteSpecApplyConfiguration) *IngressSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithComponentRoutes")
+ }
+ b.ComponentRoutes = append(b.ComponentRoutes, *values[i])
+ }
+ return b
+}
+
+// WithRequiredHSTSPolicies adds the given value to the RequiredHSTSPolicies field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the RequiredHSTSPolicies field.
+func (b *IngressSpecApplyConfiguration) WithRequiredHSTSPolicies(values ...*RequiredHSTSPolicyApplyConfiguration) *IngressSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithRequiredHSTSPolicies")
+ }
+ b.RequiredHSTSPolicies = append(b.RequiredHSTSPolicies, *values[i])
+ }
+ return b
+}
+
+// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LoadBalancer field is set to the value of the last call.
+func (b *IngressSpecApplyConfiguration) WithLoadBalancer(value *LoadBalancerApplyConfiguration) *IngressSpecApplyConfiguration {
+ b.LoadBalancer = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go
new file mode 100644
index 0000000000..7fb9917afe
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressstatus.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use
+// with apply.
+type IngressStatusApplyConfiguration struct {
+ ComponentRoutes []ComponentRouteStatusApplyConfiguration `json:"componentRoutes,omitempty"`
+ DefaultPlacement *configv1.DefaultPlacement `json:"defaultPlacement,omitempty"`
+}
+
+// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with
+// apply.
+func IngressStatus() *IngressStatusApplyConfiguration {
+ return &IngressStatusApplyConfiguration{}
+}
+
+// WithComponentRoutes adds the given value to the ComponentRoutes field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ComponentRoutes field.
+func (b *IngressStatusApplyConfiguration) WithComponentRoutes(values ...*ComponentRouteStatusApplyConfiguration) *IngressStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithComponentRoutes")
+ }
+ b.ComponentRoutes = append(b.ComponentRoutes, *values[i])
+ }
+ return b
+}
+
+// WithDefaultPlacement sets the DefaultPlacement field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DefaultPlacement field is set to the value of the last call.
+func (b *IngressStatusApplyConfiguration) WithDefaultPlacement(value configv1.DefaultPlacement) *IngressStatusApplyConfiguration {
+ b.DefaultPlacement = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go
new file mode 100644
index 0000000000..4f4ddd3752
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/keystoneidentityprovider.go
@@ -0,0 +1,56 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// KeystoneIdentityProviderApplyConfiguration represents an declarative configuration of the KeystoneIdentityProvider type for use
+// with apply.
+type KeystoneIdentityProviderApplyConfiguration struct {
+ OAuthRemoteConnectionInfoApplyConfiguration `json:",inline"`
+ DomainName *string `json:"domainName,omitempty"`
+}
+
+// KeystoneIdentityProviderApplyConfiguration constructs an declarative configuration of the KeystoneIdentityProvider type for use with
+// apply.
+func KeystoneIdentityProvider() *KeystoneIdentityProviderApplyConfiguration {
+ return &KeystoneIdentityProviderApplyConfiguration{}
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *KeystoneIdentityProviderApplyConfiguration) WithURL(value string) *KeystoneIdentityProviderApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *KeystoneIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration {
+ b.CA = value
+ return b
+}
+
+// WithTLSClientCert sets the TLSClientCert field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TLSClientCert field is set to the value of the last call.
+func (b *KeystoneIdentityProviderApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration {
+ b.TLSClientCert = value
+ return b
+}
+
+// WithTLSClientKey sets the TLSClientKey field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TLSClientKey field is set to the value of the last call.
+func (b *KeystoneIdentityProviderApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *KeystoneIdentityProviderApplyConfiguration {
+ b.TLSClientKey = value
+ return b
+}
+
+// WithDomainName sets the DomainName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DomainName field is set to the value of the last call.
+func (b *KeystoneIdentityProviderApplyConfiguration) WithDomainName(value string) *KeystoneIdentityProviderApplyConfiguration {
+ b.DomainName = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go
new file mode 100644
index 0000000000..8e092abd2e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kubevirtplatformstatus.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// KubevirtPlatformStatusApplyConfiguration represents an declarative configuration of the KubevirtPlatformStatus type for use
+// with apply.
+type KubevirtPlatformStatusApplyConfiguration struct {
+ APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"`
+ IngressIP *string `json:"ingressIP,omitempty"`
+}
+
+// KubevirtPlatformStatusApplyConfiguration constructs an declarative configuration of the KubevirtPlatformStatus type for use with
+// apply.
+func KubevirtPlatformStatus() *KubevirtPlatformStatusApplyConfiguration {
+ return &KubevirtPlatformStatusApplyConfiguration{}
+}
+
+// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalIP field is set to the value of the last call.
+func (b *KubevirtPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *KubevirtPlatformStatusApplyConfiguration {
+ b.APIServerInternalIP = &value
+ return b
+}
+
+// WithIngressIP sets the IngressIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IngressIP field is set to the value of the last call.
+func (b *KubevirtPlatformStatusApplyConfiguration) WithIngressIP(value string) *KubevirtPlatformStatusApplyConfiguration {
+ b.IngressIP = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go
new file mode 100644
index 0000000000..34a8916bea
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapattributemapping.go
@@ -0,0 +1,58 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// LDAPAttributeMappingApplyConfiguration represents an declarative configuration of the LDAPAttributeMapping type for use
+// with apply.
+type LDAPAttributeMappingApplyConfiguration struct {
+ ID []string `json:"id,omitempty"`
+ PreferredUsername []string `json:"preferredUsername,omitempty"`
+ Name []string `json:"name,omitempty"`
+ Email []string `json:"email,omitempty"`
+}
+
+// LDAPAttributeMappingApplyConfiguration constructs an declarative configuration of the LDAPAttributeMapping type for use with
+// apply.
+func LDAPAttributeMapping() *LDAPAttributeMappingApplyConfiguration {
+ return &LDAPAttributeMappingApplyConfiguration{}
+}
+
+// WithID adds the given value to the ID field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ID field.
+func (b *LDAPAttributeMappingApplyConfiguration) WithID(values ...string) *LDAPAttributeMappingApplyConfiguration {
+ for i := range values {
+ b.ID = append(b.ID, values[i])
+ }
+ return b
+}
+
+// WithPreferredUsername adds the given value to the PreferredUsername field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the PreferredUsername field.
+func (b *LDAPAttributeMappingApplyConfiguration) WithPreferredUsername(values ...string) *LDAPAttributeMappingApplyConfiguration {
+ for i := range values {
+ b.PreferredUsername = append(b.PreferredUsername, values[i])
+ }
+ return b
+}
+
+// WithName adds the given value to the Name field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Name field.
+func (b *LDAPAttributeMappingApplyConfiguration) WithName(values ...string) *LDAPAttributeMappingApplyConfiguration {
+ for i := range values {
+ b.Name = append(b.Name, values[i])
+ }
+ return b
+}
+
+// WithEmail adds the given value to the Email field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Email field.
+func (b *LDAPAttributeMappingApplyConfiguration) WithEmail(values ...string) *LDAPAttributeMappingApplyConfiguration {
+ for i := range values {
+ b.Email = append(b.Email, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go
new file mode 100644
index 0000000000..9ab1b90ef4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ldapidentityprovider.go
@@ -0,0 +1,68 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// LDAPIdentityProviderApplyConfiguration represents an declarative configuration of the LDAPIdentityProvider type for use
+// with apply.
+type LDAPIdentityProviderApplyConfiguration struct {
+ URL *string `json:"url,omitempty"`
+ BindDN *string `json:"bindDN,omitempty"`
+ BindPassword *SecretNameReferenceApplyConfiguration `json:"bindPassword,omitempty"`
+ Insecure *bool `json:"insecure,omitempty"`
+ CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"`
+ Attributes *LDAPAttributeMappingApplyConfiguration `json:"attributes,omitempty"`
+}
+
+// LDAPIdentityProviderApplyConfiguration constructs an declarative configuration of the LDAPIdentityProvider type for use with
+// apply.
+func LDAPIdentityProvider() *LDAPIdentityProviderApplyConfiguration {
+ return &LDAPIdentityProviderApplyConfiguration{}
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *LDAPIdentityProviderApplyConfiguration) WithURL(value string) *LDAPIdentityProviderApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithBindDN sets the BindDN field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BindDN field is set to the value of the last call.
+func (b *LDAPIdentityProviderApplyConfiguration) WithBindDN(value string) *LDAPIdentityProviderApplyConfiguration {
+ b.BindDN = &value
+ return b
+}
+
+// WithBindPassword sets the BindPassword field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BindPassword field is set to the value of the last call.
+func (b *LDAPIdentityProviderApplyConfiguration) WithBindPassword(value *SecretNameReferenceApplyConfiguration) *LDAPIdentityProviderApplyConfiguration {
+ b.BindPassword = value
+ return b
+}
+
+// WithInsecure sets the Insecure field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Insecure field is set to the value of the last call.
+func (b *LDAPIdentityProviderApplyConfiguration) WithInsecure(value bool) *LDAPIdentityProviderApplyConfiguration {
+ b.Insecure = &value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *LDAPIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *LDAPIdentityProviderApplyConfiguration {
+ b.CA = value
+ return b
+}
+
+// WithAttributes sets the Attributes field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Attributes field is set to the value of the last call.
+func (b *LDAPIdentityProviderApplyConfiguration) WithAttributes(value *LDAPAttributeMappingApplyConfiguration) *LDAPIdentityProviderApplyConfiguration {
+ b.Attributes = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go
new file mode 100644
index 0000000000..6f8618760b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/loadbalancer.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// LoadBalancerApplyConfiguration represents an declarative configuration of the LoadBalancer type for use
+// with apply.
+type LoadBalancerApplyConfiguration struct {
+ Platform *IngressPlatformSpecApplyConfiguration `json:"platform,omitempty"`
+}
+
+// LoadBalancerApplyConfiguration constructs an declarative configuration of the LoadBalancer type for use with
+// apply.
+func LoadBalancer() *LoadBalancerApplyConfiguration {
+ return &LoadBalancerApplyConfiguration{}
+}
+
+// WithPlatform sets the Platform field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Platform field is set to the value of the last call.
+func (b *LoadBalancerApplyConfiguration) WithPlatform(value *IngressPlatformSpecApplyConfiguration) *LoadBalancerApplyConfiguration {
+ b.Platform = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go
new file mode 100644
index 0000000000..0712a0da7a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/maxagepolicy.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// MaxAgePolicyApplyConfiguration represents an declarative configuration of the MaxAgePolicy type for use
+// with apply.
+type MaxAgePolicyApplyConfiguration struct {
+ LargestMaxAge *int32 `json:"largestMaxAge,omitempty"`
+ SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"`
+}
+
+// MaxAgePolicyApplyConfiguration constructs an declarative configuration of the MaxAgePolicy type for use with
+// apply.
+func MaxAgePolicy() *MaxAgePolicyApplyConfiguration {
+ return &MaxAgePolicyApplyConfiguration{}
+}
+
+// WithLargestMaxAge sets the LargestMaxAge field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LargestMaxAge field is set to the value of the last call.
+func (b *MaxAgePolicyApplyConfiguration) WithLargestMaxAge(value int32) *MaxAgePolicyApplyConfiguration {
+ b.LargestMaxAge = &value
+ return b
+}
+
+// WithSmallestMaxAge sets the SmallestMaxAge field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SmallestMaxAge field is set to the value of the last call.
+func (b *MaxAgePolicyApplyConfiguration) WithSmallestMaxAge(value int32) *MaxAgePolicyApplyConfiguration {
+ b.SmallestMaxAge = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go
new file mode 100644
index 0000000000..23b85cf826
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigration.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// MTUMigrationApplyConfiguration represents an declarative configuration of the MTUMigration type for use
+// with apply.
+type MTUMigrationApplyConfiguration struct {
+ Network *MTUMigrationValuesApplyConfiguration `json:"network,omitempty"`
+ Machine *MTUMigrationValuesApplyConfiguration `json:"machine,omitempty"`
+}
+
+// MTUMigrationApplyConfiguration constructs an declarative configuration of the MTUMigration type for use with
+// apply.
+func MTUMigration() *MTUMigrationApplyConfiguration {
+ return &MTUMigrationApplyConfiguration{}
+}
+
+// WithNetwork sets the Network field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Network field is set to the value of the last call.
+func (b *MTUMigrationApplyConfiguration) WithNetwork(value *MTUMigrationValuesApplyConfiguration) *MTUMigrationApplyConfiguration {
+ b.Network = value
+ return b
+}
+
+// WithMachine sets the Machine field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Machine field is set to the value of the last call.
+func (b *MTUMigrationApplyConfiguration) WithMachine(value *MTUMigrationValuesApplyConfiguration) *MTUMigrationApplyConfiguration {
+ b.Machine = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go
new file mode 100644
index 0000000000..a1e185a55c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/mtumigrationvalues.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// MTUMigrationValuesApplyConfiguration represents an declarative configuration of the MTUMigrationValues type for use
+// with apply.
+type MTUMigrationValuesApplyConfiguration struct {
+ To *uint32 `json:"to,omitempty"`
+ From *uint32 `json:"from,omitempty"`
+}
+
+// MTUMigrationValuesApplyConfiguration constructs an declarative configuration of the MTUMigrationValues type for use with
+// apply.
+func MTUMigrationValues() *MTUMigrationValuesApplyConfiguration {
+ return &MTUMigrationValuesApplyConfiguration{}
+}
+
+// WithTo sets the To field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the To field is set to the value of the last call.
+func (b *MTUMigrationValuesApplyConfiguration) WithTo(value uint32) *MTUMigrationValuesApplyConfiguration {
+ b.To = &value
+ return b
+}
+
+// WithFrom sets the From field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the From field is set to the value of the last call.
+func (b *MTUMigrationValuesApplyConfiguration) WithFrom(value uint32) *MTUMigrationValuesApplyConfiguration {
+ b.From = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go
new file mode 100644
index 0000000000..6604b627ff
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// NetworkApplyConfiguration represents an declarative configuration of the Network type for use
+// with apply.
+type NetworkApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *NetworkSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *NetworkStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// Network constructs an declarative configuration of the Network type for use with
+// apply.
+func Network(name string) *NetworkApplyConfiguration {
+ b := &NetworkApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Network")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractNetwork extracts the applied configuration owned by fieldManager from
+// network. If no managedFields are found in network for fieldManager, a
+// NetworkApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// network must be a unmodified Network API object that was retrieved from the Kubernetes API.
+// ExtractNetwork provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractNetwork(network *apiconfigv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) {
+ return extractNetwork(network, fieldManager, "")
+}
+
+// ExtractNetworkStatus is the same as ExtractNetwork except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractNetworkStatus(network *apiconfigv1.Network, fieldManager string) (*NetworkApplyConfiguration, error) {
+ return extractNetwork(network, fieldManager, "status")
+}
+
+func extractNetwork(network *apiconfigv1.Network, fieldManager string, subresource string) (*NetworkApplyConfiguration, error) {
+ b := &NetworkApplyConfiguration{}
+ err := managedfields.ExtractInto(network, internal.Parser().Type("com.github.openshift.api.config.v1.Network"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(network.Name)
+
+ b.WithKind("Network")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithKind(value string) *NetworkApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithAPIVersion(value string) *NetworkApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithName(value string) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithGenerateName(value string) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithNamespace(value string) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithUID(value types.UID) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithResourceVersion(value string) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithGeneration(value int64) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *NetworkApplyConfiguration) WithLabels(entries map[string]string) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *NetworkApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *NetworkApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *NetworkApplyConfiguration) WithFinalizers(values ...string) *NetworkApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *NetworkApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithSpec(value *NetworkSpecApplyConfiguration) *NetworkApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfiguration) *NetworkApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go
new file mode 100644
index 0000000000..93866293e3
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnostics.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// NetworkDiagnosticsApplyConfiguration represents an declarative configuration of the NetworkDiagnostics type for use
+// with apply.
+type NetworkDiagnosticsApplyConfiguration struct {
+ Mode *v1.NetworkDiagnosticsMode `json:"mode,omitempty"`
+ SourcePlacement *NetworkDiagnosticsSourcePlacementApplyConfiguration `json:"sourcePlacement,omitempty"`
+ TargetPlacement *NetworkDiagnosticsTargetPlacementApplyConfiguration `json:"targetPlacement,omitempty"`
+}
+
+// NetworkDiagnosticsApplyConfiguration constructs an declarative configuration of the NetworkDiagnostics type for use with
+// apply.
+func NetworkDiagnostics() *NetworkDiagnosticsApplyConfiguration {
+ return &NetworkDiagnosticsApplyConfiguration{}
+}
+
+// WithMode sets the Mode field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Mode field is set to the value of the last call.
+func (b *NetworkDiagnosticsApplyConfiguration) WithMode(value v1.NetworkDiagnosticsMode) *NetworkDiagnosticsApplyConfiguration {
+ b.Mode = &value
+ return b
+}
+
+// WithSourcePlacement sets the SourcePlacement field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SourcePlacement field is set to the value of the last call.
+func (b *NetworkDiagnosticsApplyConfiguration) WithSourcePlacement(value *NetworkDiagnosticsSourcePlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration {
+ b.SourcePlacement = value
+ return b
+}
+
+// WithTargetPlacement sets the TargetPlacement field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TargetPlacement field is set to the value of the last call.
+func (b *NetworkDiagnosticsApplyConfiguration) WithTargetPlacement(value *NetworkDiagnosticsTargetPlacementApplyConfiguration) *NetworkDiagnosticsApplyConfiguration {
+ b.TargetPlacement = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go
new file mode 100644
index 0000000000..efe6bbd494
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticssourceplacement.go
@@ -0,0 +1,44 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "k8s.io/api/core/v1"
+)
+
+// NetworkDiagnosticsSourcePlacementApplyConfiguration represents an declarative configuration of the NetworkDiagnosticsSourcePlacement type for use
+// with apply.
+type NetworkDiagnosticsSourcePlacementApplyConfiguration struct {
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ Tolerations []v1.Toleration `json:"tolerations,omitempty"`
+}
+
+// NetworkDiagnosticsSourcePlacementApplyConfiguration constructs an declarative configuration of the NetworkDiagnosticsSourcePlacement type for use with
+// apply.
+func NetworkDiagnosticsSourcePlacement() *NetworkDiagnosticsSourcePlacementApplyConfiguration {
+ return &NetworkDiagnosticsSourcePlacementApplyConfiguration{}
+}
+
+// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the NodeSelector field,
+// overwriting an existing map entries in NodeSelector field with the same key.
+func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsSourcePlacementApplyConfiguration {
+ if b.NodeSelector == nil && len(entries) > 0 {
+ b.NodeSelector = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.NodeSelector[k] = v
+ }
+ return b
+}
+
+// WithTolerations adds the given value to the Tolerations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Tolerations field.
+func (b *NetworkDiagnosticsSourcePlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsSourcePlacementApplyConfiguration {
+ for i := range values {
+ b.Tolerations = append(b.Tolerations, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go
new file mode 100644
index 0000000000..c1ce2d8e9d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkdiagnosticstargetplacement.go
@@ -0,0 +1,44 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "k8s.io/api/core/v1"
+)
+
+// NetworkDiagnosticsTargetPlacementApplyConfiguration represents an declarative configuration of the NetworkDiagnosticsTargetPlacement type for use
+// with apply.
+type NetworkDiagnosticsTargetPlacementApplyConfiguration struct {
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ Tolerations []v1.Toleration `json:"tolerations,omitempty"`
+}
+
+// NetworkDiagnosticsTargetPlacementApplyConfiguration constructs an declarative configuration of the NetworkDiagnosticsTargetPlacement type for use with
+// apply.
+func NetworkDiagnosticsTargetPlacement() *NetworkDiagnosticsTargetPlacementApplyConfiguration {
+ return &NetworkDiagnosticsTargetPlacementApplyConfiguration{}
+}
+
+// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the NodeSelector field,
+// overwriting an existing map entries in NodeSelector field with the same key.
+func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithNodeSelector(entries map[string]string) *NetworkDiagnosticsTargetPlacementApplyConfiguration {
+ if b.NodeSelector == nil && len(entries) > 0 {
+ b.NodeSelector = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.NodeSelector[k] = v
+ }
+ return b
+}
+
+// WithTolerations adds the given value to the Tolerations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Tolerations field.
+func (b *NetworkDiagnosticsTargetPlacementApplyConfiguration) WithTolerations(values ...v1.Toleration) *NetworkDiagnosticsTargetPlacementApplyConfiguration {
+ for i := range values {
+ b.Tolerations = append(b.Tolerations, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go
new file mode 100644
index 0000000000..c1ea6eade8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkmigration.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// NetworkMigrationApplyConfiguration represents an declarative configuration of the NetworkMigration type for use
+// with apply.
+type NetworkMigrationApplyConfiguration struct {
+ NetworkType *string `json:"networkType,omitempty"`
+ MTU *MTUMigrationApplyConfiguration `json:"mtu,omitempty"`
+}
+
+// NetworkMigrationApplyConfiguration constructs an declarative configuration of the NetworkMigration type for use with
+// apply.
+func NetworkMigration() *NetworkMigrationApplyConfiguration {
+ return &NetworkMigrationApplyConfiguration{}
+}
+
+// WithNetworkType sets the NetworkType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NetworkType field is set to the value of the last call.
+func (b *NetworkMigrationApplyConfiguration) WithNetworkType(value string) *NetworkMigrationApplyConfiguration {
+ b.NetworkType = &value
+ return b
+}
+
+// WithMTU sets the MTU field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MTU field is set to the value of the last call.
+func (b *NetworkMigrationApplyConfiguration) WithMTU(value *MTUMigrationApplyConfiguration) *NetworkMigrationApplyConfiguration {
+ b.MTU = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go
new file mode 100644
index 0000000000..5d218d02c2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkspec.go
@@ -0,0 +1,75 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// NetworkSpecApplyConfiguration represents an declarative configuration of the NetworkSpec type for use
+// with apply.
+type NetworkSpecApplyConfiguration struct {
+ ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"`
+ ServiceNetwork []string `json:"serviceNetwork,omitempty"`
+ NetworkType *string `json:"networkType,omitempty"`
+ ExternalIP *ExternalIPConfigApplyConfiguration `json:"externalIP,omitempty"`
+ ServiceNodePortRange *string `json:"serviceNodePortRange,omitempty"`
+ NetworkDiagnostics *NetworkDiagnosticsApplyConfiguration `json:"networkDiagnostics,omitempty"`
+}
+
+// NetworkSpecApplyConfiguration constructs an declarative configuration of the NetworkSpec type for use with
+// apply.
+func NetworkSpec() *NetworkSpecApplyConfiguration {
+ return &NetworkSpecApplyConfiguration{}
+}
+
+// WithClusterNetwork adds the given value to the ClusterNetwork field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ClusterNetwork field.
+func (b *NetworkSpecApplyConfiguration) WithClusterNetwork(values ...*ClusterNetworkEntryApplyConfiguration) *NetworkSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithClusterNetwork")
+ }
+ b.ClusterNetwork = append(b.ClusterNetwork, *values[i])
+ }
+ return b
+}
+
+// WithServiceNetwork adds the given value to the ServiceNetwork field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServiceNetwork field.
+func (b *NetworkSpecApplyConfiguration) WithServiceNetwork(values ...string) *NetworkSpecApplyConfiguration {
+ for i := range values {
+ b.ServiceNetwork = append(b.ServiceNetwork, values[i])
+ }
+ return b
+}
+
+// WithNetworkType sets the NetworkType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NetworkType field is set to the value of the last call.
+func (b *NetworkSpecApplyConfiguration) WithNetworkType(value string) *NetworkSpecApplyConfiguration {
+ b.NetworkType = &value
+ return b
+}
+
+// WithExternalIP sets the ExternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ExternalIP field is set to the value of the last call.
+func (b *NetworkSpecApplyConfiguration) WithExternalIP(value *ExternalIPConfigApplyConfiguration) *NetworkSpecApplyConfiguration {
+ b.ExternalIP = value
+ return b
+}
+
+// WithServiceNodePortRange sets the ServiceNodePortRange field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ServiceNodePortRange field is set to the value of the last call.
+func (b *NetworkSpecApplyConfiguration) WithServiceNodePortRange(value string) *NetworkSpecApplyConfiguration {
+ b.ServiceNodePortRange = &value
+ return b
+}
+
+// WithNetworkDiagnostics sets the NetworkDiagnostics field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NetworkDiagnostics field is set to the value of the last call.
+func (b *NetworkSpecApplyConfiguration) WithNetworkDiagnostics(value *NetworkDiagnosticsApplyConfiguration) *NetworkSpecApplyConfiguration {
+ b.NetworkDiagnostics = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go
new file mode 100644
index 0000000000..a2994e8112
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/networkstatus.go
@@ -0,0 +1,84 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// NetworkStatusApplyConfiguration represents an declarative configuration of the NetworkStatus type for use
+// with apply.
+type NetworkStatusApplyConfiguration struct {
+ ClusterNetwork []ClusterNetworkEntryApplyConfiguration `json:"clusterNetwork,omitempty"`
+ ServiceNetwork []string `json:"serviceNetwork,omitempty"`
+ NetworkType *string `json:"networkType,omitempty"`
+ ClusterNetworkMTU *int `json:"clusterNetworkMTU,omitempty"`
+ Migration *NetworkMigrationApplyConfiguration `json:"migration,omitempty"`
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+}
+
+// NetworkStatusApplyConfiguration constructs an declarative configuration of the NetworkStatus type for use with
+// apply.
+func NetworkStatus() *NetworkStatusApplyConfiguration {
+ return &NetworkStatusApplyConfiguration{}
+}
+
+// WithClusterNetwork adds the given value to the ClusterNetwork field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ClusterNetwork field.
+func (b *NetworkStatusApplyConfiguration) WithClusterNetwork(values ...*ClusterNetworkEntryApplyConfiguration) *NetworkStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithClusterNetwork")
+ }
+ b.ClusterNetwork = append(b.ClusterNetwork, *values[i])
+ }
+ return b
+}
+
+// WithServiceNetwork adds the given value to the ServiceNetwork field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServiceNetwork field.
+func (b *NetworkStatusApplyConfiguration) WithServiceNetwork(values ...string) *NetworkStatusApplyConfiguration {
+ for i := range values {
+ b.ServiceNetwork = append(b.ServiceNetwork, values[i])
+ }
+ return b
+}
+
+// WithNetworkType sets the NetworkType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NetworkType field is set to the value of the last call.
+func (b *NetworkStatusApplyConfiguration) WithNetworkType(value string) *NetworkStatusApplyConfiguration {
+ b.NetworkType = &value
+ return b
+}
+
+// WithClusterNetworkMTU sets the ClusterNetworkMTU field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClusterNetworkMTU field is set to the value of the last call.
+func (b *NetworkStatusApplyConfiguration) WithClusterNetworkMTU(value int) *NetworkStatusApplyConfiguration {
+ b.ClusterNetworkMTU = &value
+ return b
+}
+
+// WithMigration sets the Migration field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Migration field is set to the value of the last call.
+func (b *NetworkStatusApplyConfiguration) WithMigration(value *NetworkMigrationApplyConfiguration) *NetworkStatusApplyConfiguration {
+ b.Migration = value
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *NetworkStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *NetworkStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go
new file mode 100644
index 0000000000..a407a9e450
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// NodeApplyConfiguration represents an declarative configuration of the Node type for use
+// with apply.
+type NodeApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.NodeStatus `json:"status,omitempty"`
+}
+
+// Node constructs an declarative configuration of the Node type for use with
+// apply.
+func Node(name string) *NodeApplyConfiguration {
+ b := &NodeApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Node")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractNode extracts the applied configuration owned by fieldManager from
+// node. If no managedFields are found in node for fieldManager, a
+// NodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// node must be a unmodified Node API object that was retrieved from the Kubernetes API.
+// ExtractNode provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractNode(node *apiconfigv1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
+ return extractNode(node, fieldManager, "")
+}
+
+// ExtractNodeStatus is the same as ExtractNode except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractNodeStatus(node *apiconfigv1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
+ return extractNode(node, fieldManager, "status")
+}
+
+func extractNode(node *apiconfigv1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) {
+ b := &NodeApplyConfiguration{}
+ err := managedfields.ExtractInto(node, internal.Parser().Type("com.github.openshift.api.config.v1.Node"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(node.Name)
+
+ b.WithKind("Node")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithGeneration(value int64) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *NodeApplyConfiguration) WithFinalizers(values ...string) *NodeApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *NodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithSpec(value *NodeSpecApplyConfiguration) *NodeApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *NodeApplyConfiguration) WithStatus(value apiconfigv1.NodeStatus) *NodeApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go
new file mode 100644
index 0000000000..3b7bf903bc
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// NodeSpecApplyConfiguration represents an declarative configuration of the NodeSpec type for use
+// with apply.
+type NodeSpecApplyConfiguration struct {
+ CgroupMode *v1.CgroupMode `json:"cgroupMode,omitempty"`
+ WorkerLatencyProfile *v1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"`
+}
+
+// NodeSpecApplyConfiguration constructs an declarative configuration of the NodeSpec type for use with
+// apply.
+func NodeSpec() *NodeSpecApplyConfiguration {
+ return &NodeSpecApplyConfiguration{}
+}
+
+// WithCgroupMode sets the CgroupMode field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CgroupMode field is set to the value of the last call.
+func (b *NodeSpecApplyConfiguration) WithCgroupMode(value v1.CgroupMode) *NodeSpecApplyConfiguration {
+ b.CgroupMode = &value
+ return b
+}
+
+// WithWorkerLatencyProfile sets the WorkerLatencyProfile field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the WorkerLatencyProfile field is set to the value of the last call.
+func (b *NodeSpecApplyConfiguration) WithWorkerLatencyProfile(value v1.WorkerLatencyProfileType) *NodeSpecApplyConfiguration {
+ b.WorkerLatencyProfile = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go
new file mode 100644
index 0000000000..5af68e441c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixfailuredomain.go
@@ -0,0 +1,46 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// NutanixFailureDomainApplyConfiguration represents an declarative configuration of the NutanixFailureDomain type for use
+// with apply.
+type NutanixFailureDomainApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Cluster *NutanixResourceIdentifierApplyConfiguration `json:"cluster,omitempty"`
+ Subnets []NutanixResourceIdentifierApplyConfiguration `json:"subnets,omitempty"`
+}
+
+// NutanixFailureDomainApplyConfiguration constructs an declarative configuration of the NutanixFailureDomain type for use with
+// apply.
+func NutanixFailureDomain() *NutanixFailureDomainApplyConfiguration {
+ return &NutanixFailureDomainApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *NutanixFailureDomainApplyConfiguration) WithName(value string) *NutanixFailureDomainApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithCluster sets the Cluster field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Cluster field is set to the value of the last call.
+func (b *NutanixFailureDomainApplyConfiguration) WithCluster(value *NutanixResourceIdentifierApplyConfiguration) *NutanixFailureDomainApplyConfiguration {
+ b.Cluster = value
+ return b
+}
+
+// WithSubnets adds the given value to the Subnets field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Subnets field.
+func (b *NutanixFailureDomainApplyConfiguration) WithSubnets(values ...*NutanixResourceIdentifierApplyConfiguration) *NutanixFailureDomainApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithSubnets")
+ }
+ b.Subnets = append(b.Subnets, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go
new file mode 100644
index 0000000000..5ab68bb779
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// NutanixPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the NutanixPlatformLoadBalancer type for use
+// with apply.
+type NutanixPlatformLoadBalancerApplyConfiguration struct {
+ Type *v1.PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// NutanixPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the NutanixPlatformLoadBalancer type for use with
+// apply.
+func NutanixPlatformLoadBalancer() *NutanixPlatformLoadBalancerApplyConfiguration {
+ return &NutanixPlatformLoadBalancerApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *NutanixPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *NutanixPlatformLoadBalancerApplyConfiguration {
+ b.Type = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go
new file mode 100644
index 0000000000..d36708229f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformspec.go
@@ -0,0 +1,51 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// NutanixPlatformSpecApplyConfiguration represents an declarative configuration of the NutanixPlatformSpec type for use
+// with apply.
+type NutanixPlatformSpecApplyConfiguration struct {
+ PrismCentral *NutanixPrismEndpointApplyConfiguration `json:"prismCentral,omitempty"`
+ PrismElements []NutanixPrismElementEndpointApplyConfiguration `json:"prismElements,omitempty"`
+ FailureDomains []NutanixFailureDomainApplyConfiguration `json:"failureDomains,omitempty"`
+}
+
+// NutanixPlatformSpecApplyConfiguration constructs an declarative configuration of the NutanixPlatformSpec type for use with
+// apply.
+func NutanixPlatformSpec() *NutanixPlatformSpecApplyConfiguration {
+ return &NutanixPlatformSpecApplyConfiguration{}
+}
+
+// WithPrismCentral sets the PrismCentral field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PrismCentral field is set to the value of the last call.
+func (b *NutanixPlatformSpecApplyConfiguration) WithPrismCentral(value *NutanixPrismEndpointApplyConfiguration) *NutanixPlatformSpecApplyConfiguration {
+ b.PrismCentral = value
+ return b
+}
+
+// WithPrismElements adds the given value to the PrismElements field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the PrismElements field.
+func (b *NutanixPlatformSpecApplyConfiguration) WithPrismElements(values ...*NutanixPrismElementEndpointApplyConfiguration) *NutanixPlatformSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithPrismElements")
+ }
+ b.PrismElements = append(b.PrismElements, *values[i])
+ }
+ return b
+}
+
+// WithFailureDomains adds the given value to the FailureDomains field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the FailureDomains field.
+func (b *NutanixPlatformSpecApplyConfiguration) WithFailureDomains(values ...*NutanixFailureDomainApplyConfiguration) *NutanixPlatformSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithFailureDomains")
+ }
+ b.FailureDomains = append(b.FailureDomains, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go
new file mode 100644
index 0000000000..8dd8a68952
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go
@@ -0,0 +1,63 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// NutanixPlatformStatusApplyConfiguration represents an declarative configuration of the NutanixPlatformStatus type for use
+// with apply.
+type NutanixPlatformStatusApplyConfiguration struct {
+ APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"`
+ APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"`
+ IngressIP *string `json:"ingressIP,omitempty"`
+ IngressIPs []string `json:"ingressIPs,omitempty"`
+ LoadBalancer *NutanixPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"`
+}
+
+// NutanixPlatformStatusApplyConfiguration constructs an declarative configuration of the NutanixPlatformStatus type for use with
+// apply.
+func NutanixPlatformStatus() *NutanixPlatformStatusApplyConfiguration {
+ return &NutanixPlatformStatusApplyConfiguration{}
+}
+
+// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalIP field is set to the value of the last call.
+func (b *NutanixPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *NutanixPlatformStatusApplyConfiguration {
+ b.APIServerInternalIP = &value
+ return b
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *NutanixPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *NutanixPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressIP sets the IngressIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IngressIP field is set to the value of the last call.
+func (b *NutanixPlatformStatusApplyConfiguration) WithIngressIP(value string) *NutanixPlatformStatusApplyConfiguration {
+ b.IngressIP = &value
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *NutanixPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *NutanixPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LoadBalancer field is set to the value of the last call.
+func (b *NutanixPlatformStatusApplyConfiguration) WithLoadBalancer(value *NutanixPlatformLoadBalancerApplyConfiguration) *NutanixPlatformStatusApplyConfiguration {
+ b.LoadBalancer = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go
new file mode 100644
index 0000000000..3251b5343e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismelementendpoint.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// NutanixPrismElementEndpointApplyConfiguration represents an declarative configuration of the NutanixPrismElementEndpoint type for use
+// with apply.
+type NutanixPrismElementEndpointApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Endpoint *NutanixPrismEndpointApplyConfiguration `json:"endpoint,omitempty"`
+}
+
+// NutanixPrismElementEndpointApplyConfiguration constructs an declarative configuration of the NutanixPrismElementEndpoint type for use with
+// apply.
+func NutanixPrismElementEndpoint() *NutanixPrismElementEndpointApplyConfiguration {
+ return &NutanixPrismElementEndpointApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *NutanixPrismElementEndpointApplyConfiguration) WithName(value string) *NutanixPrismElementEndpointApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithEndpoint sets the Endpoint field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Endpoint field is set to the value of the last call.
+func (b *NutanixPrismElementEndpointApplyConfiguration) WithEndpoint(value *NutanixPrismEndpointApplyConfiguration) *NutanixPrismElementEndpointApplyConfiguration {
+ b.Endpoint = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go
new file mode 100644
index 0000000000..a901573249
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixprismendpoint.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// NutanixPrismEndpointApplyConfiguration represents an declarative configuration of the NutanixPrismEndpoint type for use
+// with apply.
+type NutanixPrismEndpointApplyConfiguration struct {
+ Address *string `json:"address,omitempty"`
+ Port *int32 `json:"port,omitempty"`
+}
+
+// NutanixPrismEndpointApplyConfiguration constructs an declarative configuration of the NutanixPrismEndpoint type for use with
+// apply.
+func NutanixPrismEndpoint() *NutanixPrismEndpointApplyConfiguration {
+ return &NutanixPrismEndpointApplyConfiguration{}
+}
+
+// WithAddress sets the Address field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Address field is set to the value of the last call.
+func (b *NutanixPrismEndpointApplyConfiguration) WithAddress(value string) *NutanixPrismEndpointApplyConfiguration {
+ b.Address = &value
+ return b
+}
+
+// WithPort sets the Port field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Port field is set to the value of the last call.
+func (b *NutanixPrismEndpointApplyConfiguration) WithPort(value int32) *NutanixPrismEndpointApplyConfiguration {
+ b.Port = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go
new file mode 100644
index 0000000000..cb039c42e9
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixresourceidentifier.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// NutanixResourceIdentifierApplyConfiguration represents an declarative configuration of the NutanixResourceIdentifier type for use
+// with apply.
+type NutanixResourceIdentifierApplyConfiguration struct {
+ Type *v1.NutanixIdentifierType `json:"type,omitempty"`
+ UUID *string `json:"uuid,omitempty"`
+ Name *string `json:"name,omitempty"`
+}
+
+// NutanixResourceIdentifierApplyConfiguration constructs an declarative configuration of the NutanixResourceIdentifier type for use with
+// apply.
+func NutanixResourceIdentifier() *NutanixResourceIdentifierApplyConfiguration {
+ return &NutanixResourceIdentifierApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *NutanixResourceIdentifierApplyConfiguration) WithType(value v1.NutanixIdentifierType) *NutanixResourceIdentifierApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithUUID sets the UUID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UUID field is set to the value of the last call.
+func (b *NutanixResourceIdentifierApplyConfiguration) WithUUID(value string) *NutanixResourceIdentifierApplyConfiguration {
+ b.UUID = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *NutanixResourceIdentifierApplyConfiguration) WithName(value string) *NutanixResourceIdentifierApplyConfiguration {
+ b.Name = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go
new file mode 100644
index 0000000000..1067348bd8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// OAuthApplyConfiguration represents an declarative configuration of the OAuth type for use
+// with apply.
+type OAuthApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *OAuthSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.OAuthStatus `json:"status,omitempty"`
+}
+
+// OAuth constructs an declarative configuration of the OAuth type for use with
+// apply.
+func OAuth(name string) *OAuthApplyConfiguration {
+ b := &OAuthApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("OAuth")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractOAuth extracts the applied configuration owned by fieldManager from
+// oAuth. If no managedFields are found in oAuth for fieldManager, a
+// OAuthApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// oAuth must be a unmodified OAuth API object that was retrieved from the Kubernetes API.
+// ExtractOAuth provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractOAuth(oAuth *apiconfigv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) {
+ return extractOAuth(oAuth, fieldManager, "")
+}
+
+// ExtractOAuthStatus is the same as ExtractOAuth except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractOAuthStatus(oAuth *apiconfigv1.OAuth, fieldManager string) (*OAuthApplyConfiguration, error) {
+ return extractOAuth(oAuth, fieldManager, "status")
+}
+
+func extractOAuth(oAuth *apiconfigv1.OAuth, fieldManager string, subresource string) (*OAuthApplyConfiguration, error) {
+ b := &OAuthApplyConfiguration{}
+ err := managedfields.ExtractInto(oAuth, internal.Parser().Type("com.github.openshift.api.config.v1.OAuth"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(oAuth.Name)
+
+ b.WithKind("OAuth")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithKind(value string) *OAuthApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithAPIVersion(value string) *OAuthApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithName(value string) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithGenerateName(value string) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithNamespace(value string) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithUID(value types.UID) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithResourceVersion(value string) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithGeneration(value int64) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *OAuthApplyConfiguration) WithLabels(entries map[string]string) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *OAuthApplyConfiguration) WithAnnotations(entries map[string]string) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *OAuthApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *OAuthApplyConfiguration) WithFinalizers(values ...string) *OAuthApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *OAuthApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithSpec(value *OAuthSpecApplyConfiguration) *OAuthApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *OAuthApplyConfiguration) WithStatus(value apiconfigv1.OAuthStatus) *OAuthApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go
new file mode 100644
index 0000000000..5a1cca90f7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthremoteconnectioninfo.go
@@ -0,0 +1,50 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OAuthRemoteConnectionInfoApplyConfiguration represents an declarative configuration of the OAuthRemoteConnectionInfo type for use
+// with apply.
+type OAuthRemoteConnectionInfoApplyConfiguration struct {
+ URL *string `json:"url,omitempty"`
+ CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"`
+ TLSClientCert *SecretNameReferenceApplyConfiguration `json:"tlsClientCert,omitempty"`
+ TLSClientKey *SecretNameReferenceApplyConfiguration `json:"tlsClientKey,omitempty"`
+}
+
+// OAuthRemoteConnectionInfoApplyConfiguration constructs an declarative configuration of the OAuthRemoteConnectionInfo type for use with
+// apply.
+func OAuthRemoteConnectionInfo() *OAuthRemoteConnectionInfoApplyConfiguration {
+ return &OAuthRemoteConnectionInfoApplyConfiguration{}
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithURL(value string) *OAuthRemoteConnectionInfoApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *OAuthRemoteConnectionInfoApplyConfiguration {
+ b.CA = value
+ return b
+}
+
+// WithTLSClientCert sets the TLSClientCert field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TLSClientCert field is set to the value of the last call.
+func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithTLSClientCert(value *SecretNameReferenceApplyConfiguration) *OAuthRemoteConnectionInfoApplyConfiguration {
+ b.TLSClientCert = value
+ return b
+}
+
+// WithTLSClientKey sets the TLSClientKey field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TLSClientKey field is set to the value of the last call.
+func (b *OAuthRemoteConnectionInfoApplyConfiguration) WithTLSClientKey(value *SecretNameReferenceApplyConfiguration) *OAuthRemoteConnectionInfoApplyConfiguration {
+ b.TLSClientKey = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go
new file mode 100644
index 0000000000..3fd9878827
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthspec.go
@@ -0,0 +1,46 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OAuthSpecApplyConfiguration represents an declarative configuration of the OAuthSpec type for use
+// with apply.
+type OAuthSpecApplyConfiguration struct {
+ IdentityProviders []IdentityProviderApplyConfiguration `json:"identityProviders,omitempty"`
+ TokenConfig *TokenConfigApplyConfiguration `json:"tokenConfig,omitempty"`
+ Templates *OAuthTemplatesApplyConfiguration `json:"templates,omitempty"`
+}
+
+// OAuthSpecApplyConfiguration constructs an declarative configuration of the OAuthSpec type for use with
+// apply.
+func OAuthSpec() *OAuthSpecApplyConfiguration {
+ return &OAuthSpecApplyConfiguration{}
+}
+
+// WithIdentityProviders adds the given value to the IdentityProviders field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IdentityProviders field.
+func (b *OAuthSpecApplyConfiguration) WithIdentityProviders(values ...*IdentityProviderApplyConfiguration) *OAuthSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithIdentityProviders")
+ }
+ b.IdentityProviders = append(b.IdentityProviders, *values[i])
+ }
+ return b
+}
+
+// WithTokenConfig sets the TokenConfig field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TokenConfig field is set to the value of the last call.
+func (b *OAuthSpecApplyConfiguration) WithTokenConfig(value *TokenConfigApplyConfiguration) *OAuthSpecApplyConfiguration {
+ b.TokenConfig = value
+ return b
+}
+
+// WithTemplates sets the Templates field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Templates field is set to the value of the last call.
+func (b *OAuthSpecApplyConfiguration) WithTemplates(value *OAuthTemplatesApplyConfiguration) *OAuthSpecApplyConfiguration {
+ b.Templates = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go
new file mode 100644
index 0000000000..99b615e1b4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauthtemplates.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OAuthTemplatesApplyConfiguration represents an declarative configuration of the OAuthTemplates type for use
+// with apply.
+type OAuthTemplatesApplyConfiguration struct {
+ Login *SecretNameReferenceApplyConfiguration `json:"login,omitempty"`
+ ProviderSelection *SecretNameReferenceApplyConfiguration `json:"providerSelection,omitempty"`
+ Error *SecretNameReferenceApplyConfiguration `json:"error,omitempty"`
+}
+
+// OAuthTemplatesApplyConfiguration constructs an declarative configuration of the OAuthTemplates type for use with
+// apply.
+func OAuthTemplates() *OAuthTemplatesApplyConfiguration {
+ return &OAuthTemplatesApplyConfiguration{}
+}
+
+// WithLogin sets the Login field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Login field is set to the value of the last call.
+func (b *OAuthTemplatesApplyConfiguration) WithLogin(value *SecretNameReferenceApplyConfiguration) *OAuthTemplatesApplyConfiguration {
+ b.Login = value
+ return b
+}
+
+// WithProviderSelection sets the ProviderSelection field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ProviderSelection field is set to the value of the last call.
+func (b *OAuthTemplatesApplyConfiguration) WithProviderSelection(value *SecretNameReferenceApplyConfiguration) *OAuthTemplatesApplyConfiguration {
+ b.ProviderSelection = value
+ return b
+}
+
+// WithError sets the Error field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Error field is set to the value of the last call.
+func (b *OAuthTemplatesApplyConfiguration) WithError(value *SecretNameReferenceApplyConfiguration) *OAuthTemplatesApplyConfiguration {
+ b.Error = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go
new file mode 100644
index 0000000000..fd46a832d2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/objectreference.go
@@ -0,0 +1,50 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use
+// with apply.
+type ObjectReferenceApplyConfiguration struct {
+ Group *string `json:"group,omitempty"`
+ Resource *string `json:"resource,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Name *string `json:"name,omitempty"`
+}
+
+// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with
+// apply.
+func ObjectReference() *ObjectReferenceApplyConfiguration {
+ return &ObjectReferenceApplyConfiguration{}
+}
+
+// WithGroup sets the Group field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Group field is set to the value of the last call.
+func (b *ObjectReferenceApplyConfiguration) WithGroup(value string) *ObjectReferenceApplyConfiguration {
+ b.Group = &value
+ return b
+}
+
+// WithResource sets the Resource field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Resource field is set to the value of the last call.
+func (b *ObjectReferenceApplyConfiguration) WithResource(value string) *ObjectReferenceApplyConfiguration {
+ b.Resource = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ObjectReferenceApplyConfiguration) WithNamespace(value string) *ObjectReferenceApplyConfiguration {
+ b.Namespace = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ObjectReferenceApplyConfiguration) WithName(value string) *ObjectReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go
new file mode 100644
index 0000000000..1a66c43aa3
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientconfig.go
@@ -0,0 +1,61 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OIDCClientConfigApplyConfiguration represents an declarative configuration of the OIDCClientConfig type for use
+// with apply.
+type OIDCClientConfigApplyConfiguration struct {
+ ComponentName *string `json:"componentName,omitempty"`
+ ComponentNamespace *string `json:"componentNamespace,omitempty"`
+ ClientID *string `json:"clientID,omitempty"`
+ ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"`
+ ExtraScopes []string `json:"extraScopes,omitempty"`
+}
+
+// OIDCClientConfigApplyConfiguration constructs an declarative configuration of the OIDCClientConfig type for use with
+// apply.
+func OIDCClientConfig() *OIDCClientConfigApplyConfiguration {
+ return &OIDCClientConfigApplyConfiguration{}
+}
+
+// WithComponentName sets the ComponentName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ComponentName field is set to the value of the last call.
+func (b *OIDCClientConfigApplyConfiguration) WithComponentName(value string) *OIDCClientConfigApplyConfiguration {
+ b.ComponentName = &value
+ return b
+}
+
+// WithComponentNamespace sets the ComponentNamespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ComponentNamespace field is set to the value of the last call.
+func (b *OIDCClientConfigApplyConfiguration) WithComponentNamespace(value string) *OIDCClientConfigApplyConfiguration {
+ b.ComponentNamespace = &value
+ return b
+}
+
+// WithClientID sets the ClientID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientID field is set to the value of the last call.
+func (b *OIDCClientConfigApplyConfiguration) WithClientID(value string) *OIDCClientConfigApplyConfiguration {
+ b.ClientID = &value
+ return b
+}
+
+// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientSecret field is set to the value of the last call.
+func (b *OIDCClientConfigApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *OIDCClientConfigApplyConfiguration {
+ b.ClientSecret = value
+ return b
+}
+
+// WithExtraScopes adds the given value to the ExtraScopes field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ExtraScopes field.
+func (b *OIDCClientConfigApplyConfiguration) WithExtraScopes(values ...string) *OIDCClientConfigApplyConfiguration {
+ for i := range values {
+ b.ExtraScopes = append(b.ExtraScopes, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go
new file mode 100644
index 0000000000..3c20508e54
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientreference.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OIDCClientReferenceApplyConfiguration represents an declarative configuration of the OIDCClientReference type for use
+// with apply.
+type OIDCClientReferenceApplyConfiguration struct {
+ OIDCProviderName *string `json:"oidcProviderName,omitempty"`
+ IssuerURL *string `json:"issuerURL,omitempty"`
+ ClientID *string `json:"clientID,omitempty"`
+}
+
+// OIDCClientReferenceApplyConfiguration constructs an declarative configuration of the OIDCClientReference type for use with
+// apply.
+func OIDCClientReference() *OIDCClientReferenceApplyConfiguration {
+ return &OIDCClientReferenceApplyConfiguration{}
+}
+
+// WithOIDCProviderName sets the OIDCProviderName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the OIDCProviderName field is set to the value of the last call.
+func (b *OIDCClientReferenceApplyConfiguration) WithOIDCProviderName(value string) *OIDCClientReferenceApplyConfiguration {
+ b.OIDCProviderName = &value
+ return b
+}
+
+// WithIssuerURL sets the IssuerURL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IssuerURL field is set to the value of the last call.
+func (b *OIDCClientReferenceApplyConfiguration) WithIssuerURL(value string) *OIDCClientReferenceApplyConfiguration {
+ b.IssuerURL = &value
+ return b
+}
+
+// WithClientID sets the ClientID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientID field is set to the value of the last call.
+func (b *OIDCClientReferenceApplyConfiguration) WithClientID(value string) *OIDCClientReferenceApplyConfiguration {
+ b.ClientID = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go
new file mode 100644
index 0000000000..7a57e89d06
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcclientstatus.go
@@ -0,0 +1,76 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// OIDCClientStatusApplyConfiguration represents an declarative configuration of the OIDCClientStatus type for use
+// with apply.
+type OIDCClientStatusApplyConfiguration struct {
+ ComponentName *string `json:"componentName,omitempty"`
+ ComponentNamespace *string `json:"componentNamespace,omitempty"`
+ CurrentOIDCClients []OIDCClientReferenceApplyConfiguration `json:"currentOIDCClients,omitempty"`
+ ConsumingUsers []configv1.ConsumingUser `json:"consumingUsers,omitempty"`
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+}
+
+// OIDCClientStatusApplyConfiguration constructs an declarative configuration of the OIDCClientStatus type for use with
+// apply.
+func OIDCClientStatus() *OIDCClientStatusApplyConfiguration {
+ return &OIDCClientStatusApplyConfiguration{}
+}
+
+// WithComponentName sets the ComponentName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ComponentName field is set to the value of the last call.
+func (b *OIDCClientStatusApplyConfiguration) WithComponentName(value string) *OIDCClientStatusApplyConfiguration {
+ b.ComponentName = &value
+ return b
+}
+
+// WithComponentNamespace sets the ComponentNamespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ComponentNamespace field is set to the value of the last call.
+func (b *OIDCClientStatusApplyConfiguration) WithComponentNamespace(value string) *OIDCClientStatusApplyConfiguration {
+ b.ComponentNamespace = &value
+ return b
+}
+
+// WithCurrentOIDCClients adds the given value to the CurrentOIDCClients field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the CurrentOIDCClients field.
+func (b *OIDCClientStatusApplyConfiguration) WithCurrentOIDCClients(values ...*OIDCClientReferenceApplyConfiguration) *OIDCClientStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithCurrentOIDCClients")
+ }
+ b.CurrentOIDCClients = append(b.CurrentOIDCClients, *values[i])
+ }
+ return b
+}
+
+// WithConsumingUsers adds the given value to the ConsumingUsers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ConsumingUsers field.
+func (b *OIDCClientStatusApplyConfiguration) WithConsumingUsers(values ...configv1.ConsumingUser) *OIDCClientStatusApplyConfiguration {
+ for i := range values {
+ b.ConsumingUsers = append(b.ConsumingUsers, values[i])
+ }
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *OIDCClientStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *OIDCClientStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go
new file mode 100644
index 0000000000..d700ea5e15
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oidcprovider.go
@@ -0,0 +1,69 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OIDCProviderApplyConfiguration represents an declarative configuration of the OIDCProvider type for use
+// with apply.
+type OIDCProviderApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Issuer *TokenIssuerApplyConfiguration `json:"issuer,omitempty"`
+ OIDCClients []OIDCClientConfigApplyConfiguration `json:"oidcClients,omitempty"`
+ ClaimMappings *TokenClaimMappingsApplyConfiguration `json:"claimMappings,omitempty"`
+ ClaimValidationRules []TokenClaimValidationRuleApplyConfiguration `json:"claimValidationRules,omitempty"`
+}
+
+// OIDCProviderApplyConfiguration constructs an declarative configuration of the OIDCProvider type for use with
+// apply.
+func OIDCProvider() *OIDCProviderApplyConfiguration {
+ return &OIDCProviderApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *OIDCProviderApplyConfiguration) WithName(value string) *OIDCProviderApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithIssuer sets the Issuer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Issuer field is set to the value of the last call.
+func (b *OIDCProviderApplyConfiguration) WithIssuer(value *TokenIssuerApplyConfiguration) *OIDCProviderApplyConfiguration {
+ b.Issuer = value
+ return b
+}
+
+// WithOIDCClients adds the given value to the OIDCClients field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OIDCClients field.
+func (b *OIDCProviderApplyConfiguration) WithOIDCClients(values ...*OIDCClientConfigApplyConfiguration) *OIDCProviderApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOIDCClients")
+ }
+ b.OIDCClients = append(b.OIDCClients, *values[i])
+ }
+ return b
+}
+
+// WithClaimMappings sets the ClaimMappings field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClaimMappings field is set to the value of the last call.
+func (b *OIDCProviderApplyConfiguration) WithClaimMappings(value *TokenClaimMappingsApplyConfiguration) *OIDCProviderApplyConfiguration {
+ b.ClaimMappings = value
+ return b
+}
+
+// WithClaimValidationRules adds the given value to the ClaimValidationRules field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ClaimValidationRules field.
+func (b *OIDCProviderApplyConfiguration) WithClaimValidationRules(values ...*TokenClaimValidationRuleApplyConfiguration) *OIDCProviderApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithClaimValidationRules")
+ }
+ b.ClaimValidationRules = append(b.ClaimValidationRules, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go
new file mode 100644
index 0000000000..ddaa7d5056
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openidclaims.go
@@ -0,0 +1,62 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// OpenIDClaimsApplyConfiguration represents an declarative configuration of the OpenIDClaims type for use
+// with apply.
+type OpenIDClaimsApplyConfiguration struct {
+ PreferredUsername []string `json:"preferredUsername,omitempty"`
+ Name []string `json:"name,omitempty"`
+ Email []string `json:"email,omitempty"`
+ Groups []v1.OpenIDClaim `json:"groups,omitempty"`
+}
+
+// OpenIDClaimsApplyConfiguration constructs an declarative configuration of the OpenIDClaims type for use with
+// apply.
+func OpenIDClaims() *OpenIDClaimsApplyConfiguration {
+ return &OpenIDClaimsApplyConfiguration{}
+}
+
+// WithPreferredUsername adds the given value to the PreferredUsername field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the PreferredUsername field.
+func (b *OpenIDClaimsApplyConfiguration) WithPreferredUsername(values ...string) *OpenIDClaimsApplyConfiguration {
+ for i := range values {
+ b.PreferredUsername = append(b.PreferredUsername, values[i])
+ }
+ return b
+}
+
+// WithName adds the given value to the Name field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Name field.
+func (b *OpenIDClaimsApplyConfiguration) WithName(values ...string) *OpenIDClaimsApplyConfiguration {
+ for i := range values {
+ b.Name = append(b.Name, values[i])
+ }
+ return b
+}
+
+// WithEmail adds the given value to the Email field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Email field.
+func (b *OpenIDClaimsApplyConfiguration) WithEmail(values ...string) *OpenIDClaimsApplyConfiguration {
+ for i := range values {
+ b.Email = append(b.Email, values[i])
+ }
+ return b
+}
+
+// WithGroups adds the given value to the Groups field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Groups field.
+func (b *OpenIDClaimsApplyConfiguration) WithGroups(values ...v1.OpenIDClaim) *OpenIDClaimsApplyConfiguration {
+ for i := range values {
+ b.Groups = append(b.Groups, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go
new file mode 100644
index 0000000000..6b143db8be
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openididentityprovider.go
@@ -0,0 +1,85 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OpenIDIdentityProviderApplyConfiguration represents an declarative configuration of the OpenIDIdentityProvider type for use
+// with apply.
+type OpenIDIdentityProviderApplyConfiguration struct {
+ ClientID *string `json:"clientID,omitempty"`
+ ClientSecret *SecretNameReferenceApplyConfiguration `json:"clientSecret,omitempty"`
+ CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"`
+ ExtraScopes []string `json:"extraScopes,omitempty"`
+ ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"`
+ Issuer *string `json:"issuer,omitempty"`
+ Claims *OpenIDClaimsApplyConfiguration `json:"claims,omitempty"`
+}
+
+// OpenIDIdentityProviderApplyConfiguration constructs an declarative configuration of the OpenIDIdentityProvider type for use with
+// apply.
+func OpenIDIdentityProvider() *OpenIDIdentityProviderApplyConfiguration {
+ return &OpenIDIdentityProviderApplyConfiguration{}
+}
+
+// WithClientID sets the ClientID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientID field is set to the value of the last call.
+func (b *OpenIDIdentityProviderApplyConfiguration) WithClientID(value string) *OpenIDIdentityProviderApplyConfiguration {
+ b.ClientID = &value
+ return b
+}
+
+// WithClientSecret sets the ClientSecret field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientSecret field is set to the value of the last call.
+func (b *OpenIDIdentityProviderApplyConfiguration) WithClientSecret(value *SecretNameReferenceApplyConfiguration) *OpenIDIdentityProviderApplyConfiguration {
+ b.ClientSecret = value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *OpenIDIdentityProviderApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *OpenIDIdentityProviderApplyConfiguration {
+ b.CA = value
+ return b
+}
+
+// WithExtraScopes adds the given value to the ExtraScopes field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ExtraScopes field.
+func (b *OpenIDIdentityProviderApplyConfiguration) WithExtraScopes(values ...string) *OpenIDIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.ExtraScopes = append(b.ExtraScopes, values[i])
+ }
+ return b
+}
+
+// WithExtraAuthorizeParameters puts the entries into the ExtraAuthorizeParameters field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the ExtraAuthorizeParameters field,
+// overwriting an existing map entries in ExtraAuthorizeParameters field with the same key.
+func (b *OpenIDIdentityProviderApplyConfiguration) WithExtraAuthorizeParameters(entries map[string]string) *OpenIDIdentityProviderApplyConfiguration {
+ if b.ExtraAuthorizeParameters == nil && len(entries) > 0 {
+ b.ExtraAuthorizeParameters = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ExtraAuthorizeParameters[k] = v
+ }
+ return b
+}
+
+// WithIssuer sets the Issuer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Issuer field is set to the value of the last call.
+func (b *OpenIDIdentityProviderApplyConfiguration) WithIssuer(value string) *OpenIDIdentityProviderApplyConfiguration {
+ b.Issuer = &value
+ return b
+}
+
+// WithClaims sets the Claims field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Claims field is set to the value of the last call.
+func (b *OpenIDIdentityProviderApplyConfiguration) WithClaims(value *OpenIDClaimsApplyConfiguration) *OpenIDIdentityProviderApplyConfiguration {
+ b.Claims = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go
new file mode 100644
index 0000000000..2eed83e1cb
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// OpenStackPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the OpenStackPlatformLoadBalancer type for use
+// with apply.
+type OpenStackPlatformLoadBalancerApplyConfiguration struct {
+ Type *v1.PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// OpenStackPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the OpenStackPlatformLoadBalancer type for use with
+// apply.
+func OpenStackPlatformLoadBalancer() *OpenStackPlatformLoadBalancerApplyConfiguration {
+ return &OpenStackPlatformLoadBalancerApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *OpenStackPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *OpenStackPlatformLoadBalancerApplyConfiguration {
+ b.Type = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go
new file mode 100644
index 0000000000..3a54152ae9
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformspec.go
@@ -0,0 +1,51 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// OpenStackPlatformSpecApplyConfiguration represents an declarative configuration of the OpenStackPlatformSpec type for use
+// with apply.
+type OpenStackPlatformSpecApplyConfiguration struct {
+ APIServerInternalIPs []v1.IP `json:"apiServerInternalIPs,omitempty"`
+ IngressIPs []v1.IP `json:"ingressIPs,omitempty"`
+ MachineNetworks []v1.CIDR `json:"machineNetworks,omitempty"`
+}
+
+// OpenStackPlatformSpecApplyConfiguration constructs an declarative configuration of the OpenStackPlatformSpec type for use with
+// apply.
+func OpenStackPlatformSpec() *OpenStackPlatformSpecApplyConfiguration {
+ return &OpenStackPlatformSpecApplyConfiguration{}
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *OpenStackPlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...v1.IP) *OpenStackPlatformSpecApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *OpenStackPlatformSpecApplyConfiguration) WithIngressIPs(values ...v1.IP) *OpenStackPlatformSpecApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MachineNetworks field.
+func (b *OpenStackPlatformSpecApplyConfiguration) WithMachineNetworks(values ...v1.CIDR) *OpenStackPlatformSpecApplyConfiguration {
+ for i := range values {
+ b.MachineNetworks = append(b.MachineNetworks, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go
new file mode 100644
index 0000000000..8cfab14db9
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go
@@ -0,0 +1,96 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// OpenStackPlatformStatusApplyConfiguration represents an declarative configuration of the OpenStackPlatformStatus type for use
+// with apply.
+type OpenStackPlatformStatusApplyConfiguration struct {
+ APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"`
+ APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"`
+ CloudName *string `json:"cloudName,omitempty"`
+ IngressIP *string `json:"ingressIP,omitempty"`
+ IngressIPs []string `json:"ingressIPs,omitempty"`
+ NodeDNSIP *string `json:"nodeDNSIP,omitempty"`
+ LoadBalancer *OpenStackPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"`
+ MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"`
+}
+
+// OpenStackPlatformStatusApplyConfiguration constructs an declarative configuration of the OpenStackPlatformStatus type for use with
+// apply.
+func OpenStackPlatformStatus() *OpenStackPlatformStatusApplyConfiguration {
+ return &OpenStackPlatformStatusApplyConfiguration{}
+}
+
+// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalIP field is set to the value of the last call.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *OpenStackPlatformStatusApplyConfiguration {
+ b.APIServerInternalIP = &value
+ return b
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *OpenStackPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithCloudName sets the CloudName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CloudName field is set to the value of the last call.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithCloudName(value string) *OpenStackPlatformStatusApplyConfiguration {
+ b.CloudName = &value
+ return b
+}
+
+// WithIngressIP sets the IngressIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IngressIP field is set to the value of the last call.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithIngressIP(value string) *OpenStackPlatformStatusApplyConfiguration {
+ b.IngressIP = &value
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *OpenStackPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeDNSIP field is set to the value of the last call.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *OpenStackPlatformStatusApplyConfiguration {
+ b.NodeDNSIP = &value
+ return b
+}
+
+// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LoadBalancer field is set to the value of the last call.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithLoadBalancer(value *OpenStackPlatformLoadBalancerApplyConfiguration) *OpenStackPlatformStatusApplyConfiguration {
+ b.LoadBalancer = value
+ return b
+}
+
+// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MachineNetworks field.
+func (b *OpenStackPlatformStatusApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *OpenStackPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.MachineNetworks = append(b.MachineNetworks, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go
new file mode 100644
index 0000000000..6d9fc37c88
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operandversion.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OperandVersionApplyConfiguration represents an declarative configuration of the OperandVersion type for use
+// with apply.
+type OperandVersionApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Version *string `json:"version,omitempty"`
+}
+
+// OperandVersionApplyConfiguration constructs an declarative configuration of the OperandVersion type for use with
+// apply.
+func OperandVersion() *OperandVersionApplyConfiguration {
+ return &OperandVersionApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *OperandVersionApplyConfiguration) WithName(value string) *OperandVersionApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithVersion sets the Version field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Version field is set to the value of the last call.
+func (b *OperandVersionApplyConfiguration) WithVersion(value string) *OperandVersionApplyConfiguration {
+ b.Version = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go
new file mode 100644
index 0000000000..57f017a9dc
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// OperatorHubApplyConfiguration represents an declarative configuration of the OperatorHub type for use
+// with apply.
+type OperatorHubApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *OperatorHubSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *OperatorHubStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// OperatorHub constructs an declarative configuration of the OperatorHub type for use with
+// apply.
+func OperatorHub(name string) *OperatorHubApplyConfiguration {
+ b := &OperatorHubApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("OperatorHub")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractOperatorHub extracts the applied configuration owned by fieldManager from
+// operatorHub. If no managedFields are found in operatorHub for fieldManager, a
+// OperatorHubApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// operatorHub must be a unmodified OperatorHub API object that was retrieved from the Kubernetes API.
+// ExtractOperatorHub provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractOperatorHub(operatorHub *apiconfigv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) {
+ return extractOperatorHub(operatorHub, fieldManager, "")
+}
+
+// ExtractOperatorHubStatus is the same as ExtractOperatorHub except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractOperatorHubStatus(operatorHub *apiconfigv1.OperatorHub, fieldManager string) (*OperatorHubApplyConfiguration, error) {
+ return extractOperatorHub(operatorHub, fieldManager, "status")
+}
+
+func extractOperatorHub(operatorHub *apiconfigv1.OperatorHub, fieldManager string, subresource string) (*OperatorHubApplyConfiguration, error) {
+ b := &OperatorHubApplyConfiguration{}
+ err := managedfields.ExtractInto(operatorHub, internal.Parser().Type("com.github.openshift.api.config.v1.OperatorHub"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(operatorHub.Name)
+
+ b.WithKind("OperatorHub")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithKind(value string) *OperatorHubApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithAPIVersion(value string) *OperatorHubApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithName(value string) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithGenerateName(value string) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithNamespace(value string) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithUID(value types.UID) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithResourceVersion(value string) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithGeneration(value int64) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *OperatorHubApplyConfiguration) WithLabels(entries map[string]string) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *OperatorHubApplyConfiguration) WithAnnotations(entries map[string]string) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *OperatorHubApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *OperatorHubApplyConfiguration) WithFinalizers(values ...string) *OperatorHubApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *OperatorHubApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithSpec(value *OperatorHubSpecApplyConfiguration) *OperatorHubApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *OperatorHubApplyConfiguration) WithStatus(value *OperatorHubStatusApplyConfiguration) *OperatorHubApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go
new file mode 100644
index 0000000000..831b0769ec
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubspec.go
@@ -0,0 +1,37 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OperatorHubSpecApplyConfiguration represents an declarative configuration of the OperatorHubSpec type for use
+// with apply.
+type OperatorHubSpecApplyConfiguration struct {
+ DisableAllDefaultSources *bool `json:"disableAllDefaultSources,omitempty"`
+ Sources []HubSourceApplyConfiguration `json:"sources,omitempty"`
+}
+
+// OperatorHubSpecApplyConfiguration constructs an declarative configuration of the OperatorHubSpec type for use with
+// apply.
+func OperatorHubSpec() *OperatorHubSpecApplyConfiguration {
+ return &OperatorHubSpecApplyConfiguration{}
+}
+
+// WithDisableAllDefaultSources sets the DisableAllDefaultSources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DisableAllDefaultSources field is set to the value of the last call.
+func (b *OperatorHubSpecApplyConfiguration) WithDisableAllDefaultSources(value bool) *OperatorHubSpecApplyConfiguration {
+ b.DisableAllDefaultSources = &value
+ return b
+}
+
+// WithSources adds the given value to the Sources field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Sources field.
+func (b *OperatorHubSpecApplyConfiguration) WithSources(values ...*HubSourceApplyConfiguration) *OperatorHubSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithSources")
+ }
+ b.Sources = append(b.Sources, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go
new file mode 100644
index 0000000000..86c134ff58
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhubstatus.go
@@ -0,0 +1,28 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OperatorHubStatusApplyConfiguration represents an declarative configuration of the OperatorHubStatus type for use
+// with apply.
+type OperatorHubStatusApplyConfiguration struct {
+ Sources []HubSourceStatusApplyConfiguration `json:"sources,omitempty"`
+}
+
+// OperatorHubStatusApplyConfiguration constructs an declarative configuration of the OperatorHubStatus type for use with
+// apply.
+func OperatorHubStatus() *OperatorHubStatusApplyConfiguration {
+ return &OperatorHubStatusApplyConfiguration{}
+}
+
+// WithSources adds the given value to the Sources field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Sources field.
+func (b *OperatorHubStatusApplyConfiguration) WithSources(values ...*HubSourceStatusApplyConfiguration) *OperatorHubStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithSources")
+ }
+ b.Sources = append(b.Sources, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go
new file mode 100644
index 0000000000..73c2a03a5d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// OvirtPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the OvirtPlatformLoadBalancer type for use
+// with apply.
+type OvirtPlatformLoadBalancerApplyConfiguration struct {
+ Type *v1.PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// OvirtPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the OvirtPlatformLoadBalancer type for use with
+// apply.
+func OvirtPlatformLoadBalancer() *OvirtPlatformLoadBalancerApplyConfiguration {
+ return &OvirtPlatformLoadBalancerApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *OvirtPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *OvirtPlatformLoadBalancerApplyConfiguration {
+ b.Type = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go
new file mode 100644
index 0000000000..21bb6c8426
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go
@@ -0,0 +1,72 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// OvirtPlatformStatusApplyConfiguration represents an declarative configuration of the OvirtPlatformStatus type for use
+// with apply.
+type OvirtPlatformStatusApplyConfiguration struct {
+ APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"`
+ APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"`
+ IngressIP *string `json:"ingressIP,omitempty"`
+ IngressIPs []string `json:"ingressIPs,omitempty"`
+ NodeDNSIP *string `json:"nodeDNSIP,omitempty"`
+ LoadBalancer *OvirtPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"`
+}
+
+// OvirtPlatformStatusApplyConfiguration constructs an declarative configuration of the OvirtPlatformStatus type for use with
+// apply.
+func OvirtPlatformStatus() *OvirtPlatformStatusApplyConfiguration {
+ return &OvirtPlatformStatusApplyConfiguration{}
+}
+
+// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalIP field is set to the value of the last call.
+func (b *OvirtPlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *OvirtPlatformStatusApplyConfiguration {
+ b.APIServerInternalIP = &value
+ return b
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *OvirtPlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *OvirtPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressIP sets the IngressIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IngressIP field is set to the value of the last call.
+func (b *OvirtPlatformStatusApplyConfiguration) WithIngressIP(value string) *OvirtPlatformStatusApplyConfiguration {
+ b.IngressIP = &value
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *OvirtPlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *OvirtPlatformStatusApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeDNSIP field is set to the value of the last call.
+func (b *OvirtPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *OvirtPlatformStatusApplyConfiguration {
+ b.NodeDNSIP = &value
+ return b
+}
+
+// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LoadBalancer field is set to the value of the last call.
+func (b *OvirtPlatformStatusApplyConfiguration) WithLoadBalancer(value *OvirtPlatformLoadBalancerApplyConfiguration) *OvirtPlatformStatusApplyConfiguration {
+ b.LoadBalancer = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go
new file mode 100644
index 0000000000..b5d001691c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go
@@ -0,0 +1,153 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// PlatformSpecApplyConfiguration represents an declarative configuration of the PlatformSpec type for use
+// with apply.
+type PlatformSpecApplyConfiguration struct {
+ Type *v1.PlatformType `json:"type,omitempty"`
+ AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"`
+ Azure *v1.AzurePlatformSpec `json:"azure,omitempty"`
+ GCP *v1.GCPPlatformSpec `json:"gcp,omitempty"`
+ BareMetal *BareMetalPlatformSpecApplyConfiguration `json:"baremetal,omitempty"`
+ OpenStack *OpenStackPlatformSpecApplyConfiguration `json:"openstack,omitempty"`
+ Ovirt *v1.OvirtPlatformSpec `json:"ovirt,omitempty"`
+ VSphere *VSpherePlatformSpecApplyConfiguration `json:"vsphere,omitempty"`
+ IBMCloud *v1.IBMCloudPlatformSpec `json:"ibmcloud,omitempty"`
+ Kubevirt *v1.KubevirtPlatformSpec `json:"kubevirt,omitempty"`
+ EquinixMetal *v1.EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"`
+ PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"`
+ AlibabaCloud *v1.AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"`
+ Nutanix *NutanixPlatformSpecApplyConfiguration `json:"nutanix,omitempty"`
+ External *ExternalPlatformSpecApplyConfiguration `json:"external,omitempty"`
+}
+
+// PlatformSpecApplyConfiguration constructs an declarative configuration of the PlatformSpec type for use with
+// apply.
+func PlatformSpec() *PlatformSpecApplyConfiguration {
+ return &PlatformSpecApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithType(value v1.PlatformType) *PlatformSpecApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithAWS sets the AWS field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AWS field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithAWS(value *AWSPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration {
+ b.AWS = value
+ return b
+}
+
+// WithAzure sets the Azure field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Azure field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithAzure(value v1.AzurePlatformSpec) *PlatformSpecApplyConfiguration {
+ b.Azure = &value
+ return b
+}
+
+// WithGCP sets the GCP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GCP field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithGCP(value v1.GCPPlatformSpec) *PlatformSpecApplyConfiguration {
+ b.GCP = &value
+ return b
+}
+
+// WithBareMetal sets the BareMetal field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BareMetal field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithBareMetal(value *BareMetalPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration {
+ b.BareMetal = value
+ return b
+}
+
+// WithOpenStack sets the OpenStack field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the OpenStack field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithOpenStack(value *OpenStackPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration {
+ b.OpenStack = value
+ return b
+}
+
+// WithOvirt sets the Ovirt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Ovirt field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithOvirt(value v1.OvirtPlatformSpec) *PlatformSpecApplyConfiguration {
+ b.Ovirt = &value
+ return b
+}
+
+// WithVSphere sets the VSphere field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the VSphere field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithVSphere(value *VSpherePlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration {
+ b.VSphere = value
+ return b
+}
+
+// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IBMCloud field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithIBMCloud(value v1.IBMCloudPlatformSpec) *PlatformSpecApplyConfiguration {
+ b.IBMCloud = &value
+ return b
+}
+
+// WithKubevirt sets the Kubevirt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kubevirt field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithKubevirt(value v1.KubevirtPlatformSpec) *PlatformSpecApplyConfiguration {
+ b.Kubevirt = &value
+ return b
+}
+
+// WithEquinixMetal sets the EquinixMetal field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the EquinixMetal field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithEquinixMetal(value v1.EquinixMetalPlatformSpec) *PlatformSpecApplyConfiguration {
+ b.EquinixMetal = &value
+ return b
+}
+
+// WithPowerVS sets the PowerVS field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PowerVS field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithPowerVS(value *PowerVSPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration {
+ b.PowerVS = value
+ return b
+}
+
+// WithAlibabaCloud sets the AlibabaCloud field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AlibabaCloud field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithAlibabaCloud(value v1.AlibabaCloudPlatformSpec) *PlatformSpecApplyConfiguration {
+ b.AlibabaCloud = &value
+ return b
+}
+
+// WithNutanix sets the Nutanix field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Nutanix field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithNutanix(value *NutanixPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration {
+ b.Nutanix = value
+ return b
+}
+
+// WithExternal sets the External field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the External field is set to the value of the last call.
+func (b *PlatformSpecApplyConfiguration) WithExternal(value *ExternalPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration {
+ b.External = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go
new file mode 100644
index 0000000000..b6afa04a6c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go
@@ -0,0 +1,153 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// PlatformStatusApplyConfiguration represents an declarative configuration of the PlatformStatus type for use
+// with apply.
+type PlatformStatusApplyConfiguration struct {
+ Type *v1.PlatformType `json:"type,omitempty"`
+ AWS *AWSPlatformStatusApplyConfiguration `json:"aws,omitempty"`
+ Azure *AzurePlatformStatusApplyConfiguration `json:"azure,omitempty"`
+ GCP *GCPPlatformStatusApplyConfiguration `json:"gcp,omitempty"`
+ BareMetal *BareMetalPlatformStatusApplyConfiguration `json:"baremetal,omitempty"`
+ OpenStack *OpenStackPlatformStatusApplyConfiguration `json:"openstack,omitempty"`
+ Ovirt *OvirtPlatformStatusApplyConfiguration `json:"ovirt,omitempty"`
+ VSphere *VSpherePlatformStatusApplyConfiguration `json:"vsphere,omitempty"`
+ IBMCloud *IBMCloudPlatformStatusApplyConfiguration `json:"ibmcloud,omitempty"`
+ Kubevirt *KubevirtPlatformStatusApplyConfiguration `json:"kubevirt,omitempty"`
+ EquinixMetal *EquinixMetalPlatformStatusApplyConfiguration `json:"equinixMetal,omitempty"`
+ PowerVS *PowerVSPlatformStatusApplyConfiguration `json:"powervs,omitempty"`
+ AlibabaCloud *AlibabaCloudPlatformStatusApplyConfiguration `json:"alibabaCloud,omitempty"`
+ Nutanix *NutanixPlatformStatusApplyConfiguration `json:"nutanix,omitempty"`
+ External *ExternalPlatformStatusApplyConfiguration `json:"external,omitempty"`
+}
+
+// PlatformStatusApplyConfiguration constructs an declarative configuration of the PlatformStatus type for use with
+// apply.
+func PlatformStatus() *PlatformStatusApplyConfiguration {
+ return &PlatformStatusApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithType(value v1.PlatformType) *PlatformStatusApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithAWS sets the AWS field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AWS field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithAWS(value *AWSPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.AWS = value
+ return b
+}
+
+// WithAzure sets the Azure field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Azure field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithAzure(value *AzurePlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.Azure = value
+ return b
+}
+
+// WithGCP sets the GCP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GCP field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithGCP(value *GCPPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.GCP = value
+ return b
+}
+
+// WithBareMetal sets the BareMetal field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BareMetal field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithBareMetal(value *BareMetalPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.BareMetal = value
+ return b
+}
+
+// WithOpenStack sets the OpenStack field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the OpenStack field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithOpenStack(value *OpenStackPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.OpenStack = value
+ return b
+}
+
+// WithOvirt sets the Ovirt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Ovirt field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithOvirt(value *OvirtPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.Ovirt = value
+ return b
+}
+
+// WithVSphere sets the VSphere field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the VSphere field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithVSphere(value *VSpherePlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.VSphere = value
+ return b
+}
+
+// WithIBMCloud sets the IBMCloud field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IBMCloud field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithIBMCloud(value *IBMCloudPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.IBMCloud = value
+ return b
+}
+
+// WithKubevirt sets the Kubevirt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kubevirt field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithKubevirt(value *KubevirtPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.Kubevirt = value
+ return b
+}
+
+// WithEquinixMetal sets the EquinixMetal field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the EquinixMetal field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithEquinixMetal(value *EquinixMetalPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.EquinixMetal = value
+ return b
+}
+
+// WithPowerVS sets the PowerVS field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PowerVS field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithPowerVS(value *PowerVSPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.PowerVS = value
+ return b
+}
+
+// WithAlibabaCloud sets the AlibabaCloud field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AlibabaCloud field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithAlibabaCloud(value *AlibabaCloudPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.AlibabaCloud = value
+ return b
+}
+
+// WithNutanix sets the Nutanix field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Nutanix field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithNutanix(value *NutanixPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.Nutanix = value
+ return b
+}
+
+// WithExternal sets the External field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the External field is set to the value of the last call.
+func (b *PlatformStatusApplyConfiguration) WithExternal(value *ExternalPlatformStatusApplyConfiguration) *PlatformStatusApplyConfiguration {
+ b.External = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go
new file mode 100644
index 0000000000..c371a6a262
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformspec.go
@@ -0,0 +1,28 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// PowerVSPlatformSpecApplyConfiguration represents an declarative configuration of the PowerVSPlatformSpec type for use
+// with apply.
+type PowerVSPlatformSpecApplyConfiguration struct {
+ ServiceEndpoints []PowerVSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"`
+}
+
+// PowerVSPlatformSpecApplyConfiguration constructs an declarative configuration of the PowerVSPlatformSpec type for use with
+// apply.
+func PowerVSPlatformSpec() *PowerVSPlatformSpecApplyConfiguration {
+ return &PowerVSPlatformSpecApplyConfiguration{}
+}
+
+// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field.
+func (b *PowerVSPlatformSpecApplyConfiguration) WithServiceEndpoints(values ...*PowerVSServiceEndpointApplyConfiguration) *PowerVSPlatformSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithServiceEndpoints")
+ }
+ b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go
new file mode 100644
index 0000000000..c1660d005a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go
@@ -0,0 +1,73 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// PowerVSPlatformStatusApplyConfiguration represents an declarative configuration of the PowerVSPlatformStatus type for use
+// with apply.
+type PowerVSPlatformStatusApplyConfiguration struct {
+ Region *string `json:"region,omitempty"`
+ Zone *string `json:"zone,omitempty"`
+ ResourceGroup *string `json:"resourceGroup,omitempty"`
+ ServiceEndpoints []PowerVSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"`
+ CISInstanceCRN *string `json:"cisInstanceCRN,omitempty"`
+ DNSInstanceCRN *string `json:"dnsInstanceCRN,omitempty"`
+}
+
+// PowerVSPlatformStatusApplyConfiguration constructs an declarative configuration of the PowerVSPlatformStatus type for use with
+// apply.
+func PowerVSPlatformStatus() *PowerVSPlatformStatusApplyConfiguration {
+ return &PowerVSPlatformStatusApplyConfiguration{}
+}
+
+// WithRegion sets the Region field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Region field is set to the value of the last call.
+func (b *PowerVSPlatformStatusApplyConfiguration) WithRegion(value string) *PowerVSPlatformStatusApplyConfiguration {
+ b.Region = &value
+ return b
+}
+
+// WithZone sets the Zone field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Zone field is set to the value of the last call.
+func (b *PowerVSPlatformStatusApplyConfiguration) WithZone(value string) *PowerVSPlatformStatusApplyConfiguration {
+ b.Zone = &value
+ return b
+}
+
+// WithResourceGroup sets the ResourceGroup field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceGroup field is set to the value of the last call.
+func (b *PowerVSPlatformStatusApplyConfiguration) WithResourceGroup(value string) *PowerVSPlatformStatusApplyConfiguration {
+ b.ResourceGroup = &value
+ return b
+}
+
+// WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServiceEndpoints field.
+func (b *PowerVSPlatformStatusApplyConfiguration) WithServiceEndpoints(values ...*PowerVSServiceEndpointApplyConfiguration) *PowerVSPlatformStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithServiceEndpoints")
+ }
+ b.ServiceEndpoints = append(b.ServiceEndpoints, *values[i])
+ }
+ return b
+}
+
+// WithCISInstanceCRN sets the CISInstanceCRN field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CISInstanceCRN field is set to the value of the last call.
+func (b *PowerVSPlatformStatusApplyConfiguration) WithCISInstanceCRN(value string) *PowerVSPlatformStatusApplyConfiguration {
+ b.CISInstanceCRN = &value
+ return b
+}
+
+// WithDNSInstanceCRN sets the DNSInstanceCRN field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DNSInstanceCRN field is set to the value of the last call.
+func (b *PowerVSPlatformStatusApplyConfiguration) WithDNSInstanceCRN(value string) *PowerVSPlatformStatusApplyConfiguration {
+ b.DNSInstanceCRN = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go
new file mode 100644
index 0000000000..ef262d38cd
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsserviceendpoint.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// PowerVSServiceEndpointApplyConfiguration represents an declarative configuration of the PowerVSServiceEndpoint type for use
+// with apply.
+type PowerVSServiceEndpointApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ URL *string `json:"url,omitempty"`
+}
+
+// PowerVSServiceEndpointApplyConfiguration constructs an declarative configuration of the PowerVSServiceEndpoint type for use with
+// apply.
+func PowerVSServiceEndpoint() *PowerVSServiceEndpointApplyConfiguration {
+ return &PowerVSServiceEndpointApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *PowerVSServiceEndpointApplyConfiguration) WithName(value string) *PowerVSServiceEndpointApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *PowerVSServiceEndpointApplyConfiguration) WithURL(value string) *PowerVSServiceEndpointApplyConfiguration {
+ b.URL = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go
new file mode 100644
index 0000000000..fedc364e3f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// PrefixedClaimMappingApplyConfiguration represents an declarative configuration of the PrefixedClaimMapping type for use
+// with apply.
+type PrefixedClaimMappingApplyConfiguration struct {
+ TokenClaimMappingApplyConfiguration `json:",inline"`
+ Prefix *string `json:"prefix,omitempty"`
+}
+
+// PrefixedClaimMappingApplyConfiguration constructs an declarative configuration of the PrefixedClaimMapping type for use with
+// apply.
+func PrefixedClaimMapping() *PrefixedClaimMappingApplyConfiguration {
+ return &PrefixedClaimMappingApplyConfiguration{}
+}
+
+// WithClaim sets the Claim field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Claim field is set to the value of the last call.
+func (b *PrefixedClaimMappingApplyConfiguration) WithClaim(value string) *PrefixedClaimMappingApplyConfiguration {
+ b.Claim = &value
+ return b
+}
+
+// WithPrefix sets the Prefix field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Prefix field is set to the value of the last call.
+func (b *PrefixedClaimMappingApplyConfiguration) WithPrefix(value string) *PrefixedClaimMappingApplyConfiguration {
+ b.Prefix = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go
new file mode 100644
index 0000000000..15723fcc6b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/profilecustomizations.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ProfileCustomizationsApplyConfiguration represents an declarative configuration of the ProfileCustomizations type for use
+// with apply.
+type ProfileCustomizationsApplyConfiguration struct {
+ DynamicResourceAllocation *v1.DRAEnablement `json:"dynamicResourceAllocation,omitempty"`
+}
+
+// ProfileCustomizationsApplyConfiguration constructs an declarative configuration of the ProfileCustomizations type for use with
+// apply.
+func ProfileCustomizations() *ProfileCustomizationsApplyConfiguration {
+ return &ProfileCustomizationsApplyConfiguration{}
+}
+
+// WithDynamicResourceAllocation sets the DynamicResourceAllocation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DynamicResourceAllocation field is set to the value of the last call.
+func (b *ProfileCustomizationsApplyConfiguration) WithDynamicResourceAllocation(value v1.DRAEnablement) *ProfileCustomizationsApplyConfiguration {
+ b.DynamicResourceAllocation = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go
new file mode 100644
index 0000000000..9e074da6c4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ProjectApplyConfiguration represents an declarative configuration of the Project type for use
+// with apply.
+type ProjectApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ProjectSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.ProjectStatus `json:"status,omitempty"`
+}
+
+// Project constructs an declarative configuration of the Project type for use with
+// apply.
+func Project(name string) *ProjectApplyConfiguration {
+ b := &ProjectApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Project")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractProject extracts the applied configuration owned by fieldManager from
+// project. If no managedFields are found in project for fieldManager, a
+// ProjectApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// project must be a unmodified Project API object that was retrieved from the Kubernetes API.
+// ExtractProject provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractProject(project *apiconfigv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) {
+ return extractProject(project, fieldManager, "")
+}
+
+// ExtractProjectStatus is the same as ExtractProject except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractProjectStatus(project *apiconfigv1.Project, fieldManager string) (*ProjectApplyConfiguration, error) {
+ return extractProject(project, fieldManager, "status")
+}
+
+func extractProject(project *apiconfigv1.Project, fieldManager string, subresource string) (*ProjectApplyConfiguration, error) {
+ b := &ProjectApplyConfiguration{}
+ err := managedfields.ExtractInto(project, internal.Parser().Type("com.github.openshift.api.config.v1.Project"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(project.Name)
+
+ b.WithKind("Project")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithKind(value string) *ProjectApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithAPIVersion(value string) *ProjectApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithName(value string) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithGenerateName(value string) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithNamespace(value string) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithUID(value types.UID) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithResourceVersion(value string) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithGeneration(value int64) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ProjectApplyConfiguration) WithLabels(entries map[string]string) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ProjectApplyConfiguration) WithAnnotations(entries map[string]string) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ProjectApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ProjectApplyConfiguration) WithFinalizers(values ...string) *ProjectApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ProjectApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithSpec(value *ProjectSpecApplyConfiguration) *ProjectApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ProjectApplyConfiguration) WithStatus(value apiconfigv1.ProjectStatus) *ProjectApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go
new file mode 100644
index 0000000000..0e0a2334ee
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/projectspec.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ProjectSpecApplyConfiguration represents an declarative configuration of the ProjectSpec type for use
+// with apply.
+type ProjectSpecApplyConfiguration struct {
+ ProjectRequestMessage *string `json:"projectRequestMessage,omitempty"`
+ ProjectRequestTemplate *TemplateReferenceApplyConfiguration `json:"projectRequestTemplate,omitempty"`
+}
+
+// ProjectSpecApplyConfiguration constructs an declarative configuration of the ProjectSpec type for use with
+// apply.
+func ProjectSpec() *ProjectSpecApplyConfiguration {
+ return &ProjectSpecApplyConfiguration{}
+}
+
+// WithProjectRequestMessage sets the ProjectRequestMessage field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ProjectRequestMessage field is set to the value of the last call.
+func (b *ProjectSpecApplyConfiguration) WithProjectRequestMessage(value string) *ProjectSpecApplyConfiguration {
+ b.ProjectRequestMessage = &value
+ return b
+}
+
+// WithProjectRequestTemplate sets the ProjectRequestTemplate field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ProjectRequestTemplate field is set to the value of the last call.
+func (b *ProjectSpecApplyConfiguration) WithProjectRequestTemplate(value *TemplateReferenceApplyConfiguration) *ProjectSpecApplyConfiguration {
+ b.ProjectRequestTemplate = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go
new file mode 100644
index 0000000000..282559a40d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/promqlclustercondition.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// PromQLClusterConditionApplyConfiguration represents an declarative configuration of the PromQLClusterCondition type for use
+// with apply.
+type PromQLClusterConditionApplyConfiguration struct {
+ PromQL *string `json:"promql,omitempty"`
+}
+
+// PromQLClusterConditionApplyConfiguration constructs an declarative configuration of the PromQLClusterCondition type for use with
+// apply.
+func PromQLClusterCondition() *PromQLClusterConditionApplyConfiguration {
+ return &PromQLClusterConditionApplyConfiguration{}
+}
+
+// WithPromQL sets the PromQL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PromQL field is set to the value of the last call.
+func (b *PromQLClusterConditionApplyConfiguration) WithPromQL(value string) *PromQLClusterConditionApplyConfiguration {
+ b.PromQL = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go
new file mode 100644
index 0000000000..79360af8e2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ProxyApplyConfiguration represents an declarative configuration of the Proxy type for use
+// with apply.
+type ProxyApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ProxySpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ProxyStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// Proxy constructs an declarative configuration of the Proxy type for use with
+// apply.
+func Proxy(name string) *ProxyApplyConfiguration {
+ b := &ProxyApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Proxy")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractProxy extracts the applied configuration owned by fieldManager from
+// proxy. If no managedFields are found in proxy for fieldManager, a
+// ProxyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// proxy must be a unmodified Proxy API object that was retrieved from the Kubernetes API.
+// ExtractProxy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractProxy(proxy *apiconfigv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) {
+ return extractProxy(proxy, fieldManager, "")
+}
+
+// ExtractProxyStatus is the same as ExtractProxy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractProxyStatus(proxy *apiconfigv1.Proxy, fieldManager string) (*ProxyApplyConfiguration, error) {
+ return extractProxy(proxy, fieldManager, "status")
+}
+
+func extractProxy(proxy *apiconfigv1.Proxy, fieldManager string, subresource string) (*ProxyApplyConfiguration, error) {
+ b := &ProxyApplyConfiguration{}
+ err := managedfields.ExtractInto(proxy, internal.Parser().Type("com.github.openshift.api.config.v1.Proxy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(proxy.Name)
+
+ b.WithKind("Proxy")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithKind(value string) *ProxyApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithAPIVersion(value string) *ProxyApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithName(value string) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithGenerateName(value string) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithNamespace(value string) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithUID(value types.UID) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithResourceVersion(value string) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithGeneration(value int64) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ProxyApplyConfiguration) WithLabels(entries map[string]string) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ProxyApplyConfiguration) WithAnnotations(entries map[string]string) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ProxyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ProxyApplyConfiguration) WithFinalizers(values ...string) *ProxyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ProxyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithSpec(value *ProxySpecApplyConfiguration) *ProxyApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ProxyApplyConfiguration) WithStatus(value *ProxyStatusApplyConfiguration) *ProxyApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go
new file mode 100644
index 0000000000..0eecac56c1
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxyspec.go
@@ -0,0 +1,61 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ProxySpecApplyConfiguration represents an declarative configuration of the ProxySpec type for use
+// with apply.
+type ProxySpecApplyConfiguration struct {
+ HTTPProxy *string `json:"httpProxy,omitempty"`
+ HTTPSProxy *string `json:"httpsProxy,omitempty"`
+ NoProxy *string `json:"noProxy,omitempty"`
+ ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"`
+ TrustedCA *ConfigMapNameReferenceApplyConfiguration `json:"trustedCA,omitempty"`
+}
+
+// ProxySpecApplyConfiguration constructs an declarative configuration of the ProxySpec type for use with
+// apply.
+func ProxySpec() *ProxySpecApplyConfiguration {
+ return &ProxySpecApplyConfiguration{}
+}
+
+// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HTTPProxy field is set to the value of the last call.
+func (b *ProxySpecApplyConfiguration) WithHTTPProxy(value string) *ProxySpecApplyConfiguration {
+ b.HTTPProxy = &value
+ return b
+}
+
+// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HTTPSProxy field is set to the value of the last call.
+func (b *ProxySpecApplyConfiguration) WithHTTPSProxy(value string) *ProxySpecApplyConfiguration {
+ b.HTTPSProxy = &value
+ return b
+}
+
+// WithNoProxy sets the NoProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NoProxy field is set to the value of the last call.
+func (b *ProxySpecApplyConfiguration) WithNoProxy(value string) *ProxySpecApplyConfiguration {
+ b.NoProxy = &value
+ return b
+}
+
+// WithReadinessEndpoints adds the given value to the ReadinessEndpoints field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ReadinessEndpoints field.
+func (b *ProxySpecApplyConfiguration) WithReadinessEndpoints(values ...string) *ProxySpecApplyConfiguration {
+ for i := range values {
+ b.ReadinessEndpoints = append(b.ReadinessEndpoints, values[i])
+ }
+ return b
+}
+
+// WithTrustedCA sets the TrustedCA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TrustedCA field is set to the value of the last call.
+func (b *ProxySpecApplyConfiguration) WithTrustedCA(value *ConfigMapNameReferenceApplyConfiguration) *ProxySpecApplyConfiguration {
+ b.TrustedCA = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go
new file mode 100644
index 0000000000..069d479fbd
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxystatus.go
@@ -0,0 +1,41 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ProxyStatusApplyConfiguration represents an declarative configuration of the ProxyStatus type for use
+// with apply.
+type ProxyStatusApplyConfiguration struct {
+ HTTPProxy *string `json:"httpProxy,omitempty"`
+ HTTPSProxy *string `json:"httpsProxy,omitempty"`
+ NoProxy *string `json:"noProxy,omitempty"`
+}
+
+// ProxyStatusApplyConfiguration constructs an declarative configuration of the ProxyStatus type for use with
+// apply.
+func ProxyStatus() *ProxyStatusApplyConfiguration {
+ return &ProxyStatusApplyConfiguration{}
+}
+
+// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HTTPProxy field is set to the value of the last call.
+func (b *ProxyStatusApplyConfiguration) WithHTTPProxy(value string) *ProxyStatusApplyConfiguration {
+ b.HTTPProxy = &value
+ return b
+}
+
+// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HTTPSProxy field is set to the value of the last call.
+func (b *ProxyStatusApplyConfiguration) WithHTTPSProxy(value string) *ProxyStatusApplyConfiguration {
+ b.HTTPSProxy = &value
+ return b
+}
+
+// WithNoProxy sets the NoProxy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NoProxy field is set to the value of the last call.
+func (b *ProxyStatusApplyConfiguration) WithNoProxy(value string) *ProxyStatusApplyConfiguration {
+ b.NoProxy = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go
new file mode 100644
index 0000000000..2f48be9320
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrylocation.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// RegistryLocationApplyConfiguration represents an declarative configuration of the RegistryLocation type for use
+// with apply.
+type RegistryLocationApplyConfiguration struct {
+ DomainName *string `json:"domainName,omitempty"`
+ Insecure *bool `json:"insecure,omitempty"`
+}
+
+// RegistryLocationApplyConfiguration constructs an declarative configuration of the RegistryLocation type for use with
+// apply.
+func RegistryLocation() *RegistryLocationApplyConfiguration {
+ return &RegistryLocationApplyConfiguration{}
+}
+
+// WithDomainName sets the DomainName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DomainName field is set to the value of the last call.
+func (b *RegistryLocationApplyConfiguration) WithDomainName(value string) *RegistryLocationApplyConfiguration {
+ b.DomainName = &value
+ return b
+}
+
+// WithInsecure sets the Insecure field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Insecure field is set to the value of the last call.
+func (b *RegistryLocationApplyConfiguration) WithInsecure(value bool) *RegistryLocationApplyConfiguration {
+ b.Insecure = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go
new file mode 100644
index 0000000000..02ff90c577
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/registrysources.go
@@ -0,0 +1,58 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// RegistrySourcesApplyConfiguration represents an declarative configuration of the RegistrySources type for use
+// with apply.
+type RegistrySourcesApplyConfiguration struct {
+ InsecureRegistries []string `json:"insecureRegistries,omitempty"`
+ BlockedRegistries []string `json:"blockedRegistries,omitempty"`
+ AllowedRegistries []string `json:"allowedRegistries,omitempty"`
+ ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"`
+}
+
+// RegistrySourcesApplyConfiguration constructs an declarative configuration of the RegistrySources type for use with
+// apply.
+func RegistrySources() *RegistrySourcesApplyConfiguration {
+ return &RegistrySourcesApplyConfiguration{}
+}
+
+// WithInsecureRegistries adds the given value to the InsecureRegistries field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the InsecureRegistries field.
+func (b *RegistrySourcesApplyConfiguration) WithInsecureRegistries(values ...string) *RegistrySourcesApplyConfiguration {
+ for i := range values {
+ b.InsecureRegistries = append(b.InsecureRegistries, values[i])
+ }
+ return b
+}
+
+// WithBlockedRegistries adds the given value to the BlockedRegistries field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the BlockedRegistries field.
+func (b *RegistrySourcesApplyConfiguration) WithBlockedRegistries(values ...string) *RegistrySourcesApplyConfiguration {
+ for i := range values {
+ b.BlockedRegistries = append(b.BlockedRegistries, values[i])
+ }
+ return b
+}
+
+// WithAllowedRegistries adds the given value to the AllowedRegistries field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AllowedRegistries field.
+func (b *RegistrySourcesApplyConfiguration) WithAllowedRegistries(values ...string) *RegistrySourcesApplyConfiguration {
+ for i := range values {
+ b.AllowedRegistries = append(b.AllowedRegistries, values[i])
+ }
+ return b
+}
+
+// WithContainerRuntimeSearchRegistries adds the given value to the ContainerRuntimeSearchRegistries field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ContainerRuntimeSearchRegistries field.
+func (b *RegistrySourcesApplyConfiguration) WithContainerRuntimeSearchRegistries(values ...string) *RegistrySourcesApplyConfiguration {
+ for i := range values {
+ b.ContainerRuntimeSearchRegistries = append(b.ContainerRuntimeSearchRegistries, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go
new file mode 100644
index 0000000000..18b3f76f84
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go
@@ -0,0 +1,56 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// ReleaseApplyConfiguration represents an declarative configuration of the Release type for use
+// with apply.
+type ReleaseApplyConfiguration struct {
+ Version *string `json:"version,omitempty"`
+ Image *string `json:"image,omitempty"`
+ URL *v1.URL `json:"url,omitempty"`
+ Channels []string `json:"channels,omitempty"`
+}
+
+// ReleaseApplyConfiguration constructs an declarative configuration of the Release type for use with
+// apply.
+func Release() *ReleaseApplyConfiguration {
+ return &ReleaseApplyConfiguration{}
+}
+
+// WithVersion sets the Version field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Version field is set to the value of the last call.
+func (b *ReleaseApplyConfiguration) WithVersion(value string) *ReleaseApplyConfiguration {
+ b.Version = &value
+ return b
+}
+
+// WithImage sets the Image field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Image field is set to the value of the last call.
+func (b *ReleaseApplyConfiguration) WithImage(value string) *ReleaseApplyConfiguration {
+ b.Image = &value
+ return b
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *ReleaseApplyConfiguration) WithURL(value v1.URL) *ReleaseApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithChannels adds the given value to the Channels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Channels field.
+func (b *ReleaseApplyConfiguration) WithChannels(values ...string) *ReleaseApplyConfiguration {
+ for i := range values {
+ b.Channels = append(b.Channels, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go
new file mode 100644
index 0000000000..2806aea92b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/repositorydigestmirrors.go
@@ -0,0 +1,47 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// RepositoryDigestMirrorsApplyConfiguration represents an declarative configuration of the RepositoryDigestMirrors type for use
+// with apply.
+type RepositoryDigestMirrorsApplyConfiguration struct {
+ Source *string `json:"source,omitempty"`
+ AllowMirrorByTags *bool `json:"allowMirrorByTags,omitempty"`
+ Mirrors []v1.Mirror `json:"mirrors,omitempty"`
+}
+
+// RepositoryDigestMirrorsApplyConfiguration constructs an declarative configuration of the RepositoryDigestMirrors type for use with
+// apply.
+func RepositoryDigestMirrors() *RepositoryDigestMirrorsApplyConfiguration {
+ return &RepositoryDigestMirrorsApplyConfiguration{}
+}
+
+// WithSource sets the Source field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Source field is set to the value of the last call.
+func (b *RepositoryDigestMirrorsApplyConfiguration) WithSource(value string) *RepositoryDigestMirrorsApplyConfiguration {
+ b.Source = &value
+ return b
+}
+
+// WithAllowMirrorByTags sets the AllowMirrorByTags field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AllowMirrorByTags field is set to the value of the last call.
+func (b *RepositoryDigestMirrorsApplyConfiguration) WithAllowMirrorByTags(value bool) *RepositoryDigestMirrorsApplyConfiguration {
+ b.AllowMirrorByTags = &value
+ return b
+}
+
+// WithMirrors adds the given value to the Mirrors field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Mirrors field.
+func (b *RepositoryDigestMirrorsApplyConfiguration) WithMirrors(values ...v1.Mirror) *RepositoryDigestMirrorsApplyConfiguration {
+ for i := range values {
+ b.Mirrors = append(b.Mirrors, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go
new file mode 100644
index 0000000000..fa787ce7c2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requestheaderidentityprovider.go
@@ -0,0 +1,96 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// RequestHeaderIdentityProviderApplyConfiguration represents an declarative configuration of the RequestHeaderIdentityProvider type for use
+// with apply.
+type RequestHeaderIdentityProviderApplyConfiguration struct {
+ LoginURL *string `json:"loginURL,omitempty"`
+ ChallengeURL *string `json:"challengeURL,omitempty"`
+ ClientCA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"`
+ ClientCommonNames []string `json:"clientCommonNames,omitempty"`
+ Headers []string `json:"headers,omitempty"`
+ PreferredUsernameHeaders []string `json:"preferredUsernameHeaders,omitempty"`
+ NameHeaders []string `json:"nameHeaders,omitempty"`
+ EmailHeaders []string `json:"emailHeaders,omitempty"`
+}
+
+// RequestHeaderIdentityProviderApplyConfiguration constructs an declarative configuration of the RequestHeaderIdentityProvider type for use with
+// apply.
+func RequestHeaderIdentityProvider() *RequestHeaderIdentityProviderApplyConfiguration {
+ return &RequestHeaderIdentityProviderApplyConfiguration{}
+}
+
+// WithLoginURL sets the LoginURL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LoginURL field is set to the value of the last call.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithLoginURL(value string) *RequestHeaderIdentityProviderApplyConfiguration {
+ b.LoginURL = &value
+ return b
+}
+
+// WithChallengeURL sets the ChallengeURL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ChallengeURL field is set to the value of the last call.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithChallengeURL(value string) *RequestHeaderIdentityProviderApplyConfiguration {
+ b.ChallengeURL = &value
+ return b
+}
+
+// WithClientCA sets the ClientCA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClientCA field is set to the value of the last call.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithClientCA(value *ConfigMapNameReferenceApplyConfiguration) *RequestHeaderIdentityProviderApplyConfiguration {
+ b.ClientCA = value
+ return b
+}
+
+// WithClientCommonNames adds the given value to the ClientCommonNames field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ClientCommonNames field.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithClientCommonNames(values ...string) *RequestHeaderIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.ClientCommonNames = append(b.ClientCommonNames, values[i])
+ }
+ return b
+}
+
+// WithHeaders adds the given value to the Headers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Headers field.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.Headers = append(b.Headers, values[i])
+ }
+ return b
+}
+
+// WithPreferredUsernameHeaders adds the given value to the PreferredUsernameHeaders field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the PreferredUsernameHeaders field.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithPreferredUsernameHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.PreferredUsernameHeaders = append(b.PreferredUsernameHeaders, values[i])
+ }
+ return b
+}
+
+// WithNameHeaders adds the given value to the NameHeaders field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the NameHeaders field.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithNameHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.NameHeaders = append(b.NameHeaders, values[i])
+ }
+ return b
+}
+
+// WithEmailHeaders adds the given value to the EmailHeaders field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the EmailHeaders field.
+func (b *RequestHeaderIdentityProviderApplyConfiguration) WithEmailHeaders(values ...string) *RequestHeaderIdentityProviderApplyConfiguration {
+ for i := range values {
+ b.EmailHeaders = append(b.EmailHeaders, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go
new file mode 100644
index 0000000000..5b3e741077
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/requiredhstspolicy.go
@@ -0,0 +1,66 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// RequiredHSTSPolicyApplyConfiguration represents an declarative configuration of the RequiredHSTSPolicy type for use
+// with apply.
+type RequiredHSTSPolicyApplyConfiguration struct {
+ NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ DomainPatterns []string `json:"domainPatterns,omitempty"`
+ MaxAge *MaxAgePolicyApplyConfiguration `json:"maxAge,omitempty"`
+ PreloadPolicy *apiconfigv1.PreloadPolicy `json:"preloadPolicy,omitempty"`
+ IncludeSubDomainsPolicy *apiconfigv1.IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"`
+}
+
+// RequiredHSTSPolicyApplyConfiguration constructs an declarative configuration of the RequiredHSTSPolicy type for use with
+// apply.
+func RequiredHSTSPolicy() *RequiredHSTSPolicyApplyConfiguration {
+ return &RequiredHSTSPolicyApplyConfiguration{}
+}
+
+// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NamespaceSelector field is set to the value of the last call.
+func (b *RequiredHSTSPolicyApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *RequiredHSTSPolicyApplyConfiguration {
+ b.NamespaceSelector = value
+ return b
+}
+
+// WithDomainPatterns adds the given value to the DomainPatterns field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the DomainPatterns field.
+func (b *RequiredHSTSPolicyApplyConfiguration) WithDomainPatterns(values ...string) *RequiredHSTSPolicyApplyConfiguration {
+ for i := range values {
+ b.DomainPatterns = append(b.DomainPatterns, values[i])
+ }
+ return b
+}
+
+// WithMaxAge sets the MaxAge field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MaxAge field is set to the value of the last call.
+func (b *RequiredHSTSPolicyApplyConfiguration) WithMaxAge(value *MaxAgePolicyApplyConfiguration) *RequiredHSTSPolicyApplyConfiguration {
+ b.MaxAge = value
+ return b
+}
+
+// WithPreloadPolicy sets the PreloadPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PreloadPolicy field is set to the value of the last call.
+func (b *RequiredHSTSPolicyApplyConfiguration) WithPreloadPolicy(value apiconfigv1.PreloadPolicy) *RequiredHSTSPolicyApplyConfiguration {
+ b.PreloadPolicy = &value
+ return b
+}
+
+// WithIncludeSubDomainsPolicy sets the IncludeSubDomainsPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IncludeSubDomainsPolicy field is set to the value of the last call.
+func (b *RequiredHSTSPolicyApplyConfiguration) WithIncludeSubDomainsPolicy(value apiconfigv1.IncludeSubDomainsPolicy) *RequiredHSTSPolicyApplyConfiguration {
+ b.IncludeSubDomainsPolicy = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go
new file mode 100644
index 0000000000..d9de60a42f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ apiconfigv1 "github.com/openshift/api/config/v1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// SchedulerApplyConfiguration represents an declarative configuration of the Scheduler type for use
+// with apply.
+type SchedulerApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *SchedulerSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *apiconfigv1.SchedulerStatus `json:"status,omitempty"`
+}
+
+// Scheduler constructs an declarative configuration of the Scheduler type for use with
+// apply.
+func Scheduler(name string) *SchedulerApplyConfiguration {
+ b := &SchedulerApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Scheduler")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b
+}
+
+// ExtractScheduler extracts the applied configuration owned by fieldManager from
+// scheduler. If no managedFields are found in scheduler for fieldManager, a
+// SchedulerApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// scheduler must be a unmodified Scheduler API object that was retrieved from the Kubernetes API.
+// ExtractScheduler provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractScheduler(scheduler *apiconfigv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) {
+ return extractScheduler(scheduler, fieldManager, "")
+}
+
+// ExtractSchedulerStatus is the same as ExtractScheduler except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractSchedulerStatus(scheduler *apiconfigv1.Scheduler, fieldManager string) (*SchedulerApplyConfiguration, error) {
+ return extractScheduler(scheduler, fieldManager, "status")
+}
+
+func extractScheduler(scheduler *apiconfigv1.Scheduler, fieldManager string, subresource string) (*SchedulerApplyConfiguration, error) {
+ b := &SchedulerApplyConfiguration{}
+ err := managedfields.ExtractInto(scheduler, internal.Parser().Type("com.github.openshift.api.config.v1.Scheduler"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(scheduler.Name)
+
+ b.WithKind("Scheduler")
+ b.WithAPIVersion("config.openshift.io/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithKind(value string) *SchedulerApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithAPIVersion(value string) *SchedulerApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithName(value string) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithGenerateName(value string) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithNamespace(value string) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithUID(value types.UID) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithResourceVersion(value string) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithGeneration(value int64) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *SchedulerApplyConfiguration) WithLabels(entries map[string]string) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *SchedulerApplyConfiguration) WithAnnotations(entries map[string]string) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *SchedulerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *SchedulerApplyConfiguration) WithFinalizers(values ...string) *SchedulerApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *SchedulerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithSpec(value *SchedulerSpecApplyConfiguration) *SchedulerApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *SchedulerApplyConfiguration) WithStatus(value apiconfigv1.SchedulerStatus) *SchedulerApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go
new file mode 100644
index 0000000000..1df067067f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/schedulerspec.go
@@ -0,0 +1,63 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// SchedulerSpecApplyConfiguration represents an declarative configuration of the SchedulerSpec type for use
+// with apply.
+type SchedulerSpecApplyConfiguration struct {
+ Policy *ConfigMapNameReferenceApplyConfiguration `json:"policy,omitempty"`
+ Profile *configv1.SchedulerProfile `json:"profile,omitempty"`
+ ProfileCustomizations *ProfileCustomizationsApplyConfiguration `json:"profileCustomizations,omitempty"`
+ DefaultNodeSelector *string `json:"defaultNodeSelector,omitempty"`
+ MastersSchedulable *bool `json:"mastersSchedulable,omitempty"`
+}
+
+// SchedulerSpecApplyConfiguration constructs an declarative configuration of the SchedulerSpec type for use with
+// apply.
+func SchedulerSpec() *SchedulerSpecApplyConfiguration {
+ return &SchedulerSpecApplyConfiguration{}
+}
+
+// WithPolicy sets the Policy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Policy field is set to the value of the last call.
+func (b *SchedulerSpecApplyConfiguration) WithPolicy(value *ConfigMapNameReferenceApplyConfiguration) *SchedulerSpecApplyConfiguration {
+ b.Policy = value
+ return b
+}
+
+// WithProfile sets the Profile field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Profile field is set to the value of the last call.
+func (b *SchedulerSpecApplyConfiguration) WithProfile(value configv1.SchedulerProfile) *SchedulerSpecApplyConfiguration {
+ b.Profile = &value
+ return b
+}
+
+// WithProfileCustomizations sets the ProfileCustomizations field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ProfileCustomizations field is set to the value of the last call.
+func (b *SchedulerSpecApplyConfiguration) WithProfileCustomizations(value *ProfileCustomizationsApplyConfiguration) *SchedulerSpecApplyConfiguration {
+ b.ProfileCustomizations = value
+ return b
+}
+
+// WithDefaultNodeSelector sets the DefaultNodeSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DefaultNodeSelector field is set to the value of the last call.
+func (b *SchedulerSpecApplyConfiguration) WithDefaultNodeSelector(value string) *SchedulerSpecApplyConfiguration {
+ b.DefaultNodeSelector = &value
+ return b
+}
+
+// WithMastersSchedulable sets the MastersSchedulable field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MastersSchedulable field is set to the value of the last call.
+func (b *SchedulerSpecApplyConfiguration) WithMastersSchedulable(value bool) *SchedulerSpecApplyConfiguration {
+ b.MastersSchedulable = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go
new file mode 100644
index 0000000000..9cd673082c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/secretnamereference.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// SecretNameReferenceApplyConfiguration represents an declarative configuration of the SecretNameReference type for use
+// with apply.
+type SecretNameReferenceApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+}
+
+// SecretNameReferenceApplyConfiguration constructs an declarative configuration of the SecretNameReference type for use with
+// apply.
+func SecretNameReference() *SecretNameReferenceApplyConfiguration {
+ return &SecretNameReferenceApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *SecretNameReferenceApplyConfiguration) WithName(value string) *SecretNameReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go
new file mode 100644
index 0000000000..b0d95c6e74
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/signaturestore.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// SignatureStoreApplyConfiguration represents an declarative configuration of the SignatureStore type for use
+// with apply.
+type SignatureStoreApplyConfiguration struct {
+ URL *string `json:"url,omitempty"`
+ CA *ConfigMapNameReferenceApplyConfiguration `json:"ca,omitempty"`
+}
+
+// SignatureStoreApplyConfiguration constructs an declarative configuration of the SignatureStore type for use with
+// apply.
+func SignatureStore() *SignatureStoreApplyConfiguration {
+ return &SignatureStoreApplyConfiguration{}
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *SignatureStoreApplyConfiguration) WithURL(value string) *SignatureStoreApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithCA sets the CA field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CA field is set to the value of the last call.
+func (b *SignatureStoreApplyConfiguration) WithCA(value *ConfigMapNameReferenceApplyConfiguration) *SignatureStoreApplyConfiguration {
+ b.CA = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go
new file mode 100644
index 0000000000..e1da30be39
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/templatereference.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// TemplateReferenceApplyConfiguration represents an declarative configuration of the TemplateReference type for use
+// with apply.
+type TemplateReferenceApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+}
+
+// TemplateReferenceApplyConfiguration constructs an declarative configuration of the TemplateReference type for use with
+// apply.
+func TemplateReference() *TemplateReferenceApplyConfiguration {
+ return &TemplateReferenceApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *TemplateReferenceApplyConfiguration) WithName(value string) *TemplateReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go
new file mode 100644
index 0000000000..3e08cc9ade
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlsprofilespec.go
@@ -0,0 +1,38 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// TLSProfileSpecApplyConfiguration represents an declarative configuration of the TLSProfileSpec type for use
+// with apply.
+type TLSProfileSpecApplyConfiguration struct {
+ Ciphers []string `json:"ciphers,omitempty"`
+ MinTLSVersion *v1.TLSProtocolVersion `json:"minTLSVersion,omitempty"`
+}
+
+// TLSProfileSpecApplyConfiguration constructs an declarative configuration of the TLSProfileSpec type for use with
+// apply.
+func TLSProfileSpec() *TLSProfileSpecApplyConfiguration {
+ return &TLSProfileSpecApplyConfiguration{}
+}
+
+// WithCiphers adds the given value to the Ciphers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Ciphers field.
+func (b *TLSProfileSpecApplyConfiguration) WithCiphers(values ...string) *TLSProfileSpecApplyConfiguration {
+ for i := range values {
+ b.Ciphers = append(b.Ciphers, values[i])
+ }
+ return b
+}
+
+// WithMinTLSVersion sets the MinTLSVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinTLSVersion field is set to the value of the last call.
+func (b *TLSProfileSpecApplyConfiguration) WithMinTLSVersion(value v1.TLSProtocolVersion) *TLSProfileSpecApplyConfiguration {
+ b.MinTLSVersion = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go
new file mode 100644
index 0000000000..fb32e8516f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tlssecurityprofile.go
@@ -0,0 +1,63 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// TLSSecurityProfileApplyConfiguration represents an declarative configuration of the TLSSecurityProfile type for use
+// with apply.
+type TLSSecurityProfileApplyConfiguration struct {
+ Type *v1.TLSProfileType `json:"type,omitempty"`
+ Old *v1.OldTLSProfile `json:"old,omitempty"`
+ Intermediate *v1.IntermediateTLSProfile `json:"intermediate,omitempty"`
+ Modern *v1.ModernTLSProfile `json:"modern,omitempty"`
+ Custom *CustomTLSProfileApplyConfiguration `json:"custom,omitempty"`
+}
+
+// TLSSecurityProfileApplyConfiguration constructs an declarative configuration of the TLSSecurityProfile type for use with
+// apply.
+func TLSSecurityProfile() *TLSSecurityProfileApplyConfiguration {
+ return &TLSSecurityProfileApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *TLSSecurityProfileApplyConfiguration) WithType(value v1.TLSProfileType) *TLSSecurityProfileApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithOld sets the Old field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Old field is set to the value of the last call.
+func (b *TLSSecurityProfileApplyConfiguration) WithOld(value v1.OldTLSProfile) *TLSSecurityProfileApplyConfiguration {
+ b.Old = &value
+ return b
+}
+
+// WithIntermediate sets the Intermediate field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Intermediate field is set to the value of the last call.
+func (b *TLSSecurityProfileApplyConfiguration) WithIntermediate(value v1.IntermediateTLSProfile) *TLSSecurityProfileApplyConfiguration {
+ b.Intermediate = &value
+ return b
+}
+
+// WithModern sets the Modern field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Modern field is set to the value of the last call.
+func (b *TLSSecurityProfileApplyConfiguration) WithModern(value v1.ModernTLSProfile) *TLSSecurityProfileApplyConfiguration {
+ b.Modern = &value
+ return b
+}
+
+// WithCustom sets the Custom field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Custom field is set to the value of the last call.
+func (b *TLSSecurityProfileApplyConfiguration) WithCustom(value *CustomTLSProfileApplyConfiguration) *TLSSecurityProfileApplyConfiguration {
+ b.Custom = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go
new file mode 100644
index 0000000000..91c29b61c4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmapping.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// TokenClaimMappingApplyConfiguration represents an declarative configuration of the TokenClaimMapping type for use
+// with apply.
+type TokenClaimMappingApplyConfiguration struct {
+ Claim *string `json:"claim,omitempty"`
+}
+
+// TokenClaimMappingApplyConfiguration constructs an declarative configuration of the TokenClaimMapping type for use with
+// apply.
+func TokenClaimMapping() *TokenClaimMappingApplyConfiguration {
+ return &TokenClaimMappingApplyConfiguration{}
+}
+
+// WithClaim sets the Claim field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Claim field is set to the value of the last call.
+func (b *TokenClaimMappingApplyConfiguration) WithClaim(value string) *TokenClaimMappingApplyConfiguration {
+ b.Claim = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go
new file mode 100644
index 0000000000..1a2fdb0953
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimmappings.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// TokenClaimMappingsApplyConfiguration represents an declarative configuration of the TokenClaimMappings type for use
+// with apply.
+type TokenClaimMappingsApplyConfiguration struct {
+ Username *UsernameClaimMappingApplyConfiguration `json:"username,omitempty"`
+ Groups *PrefixedClaimMappingApplyConfiguration `json:"groups,omitempty"`
+}
+
+// TokenClaimMappingsApplyConfiguration constructs an declarative configuration of the TokenClaimMappings type for use with
+// apply.
+func TokenClaimMappings() *TokenClaimMappingsApplyConfiguration {
+ return &TokenClaimMappingsApplyConfiguration{}
+}
+
+// WithUsername sets the Username field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Username field is set to the value of the last call.
+func (b *TokenClaimMappingsApplyConfiguration) WithUsername(value *UsernameClaimMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration {
+ b.Username = value
+ return b
+}
+
+// WithGroups sets the Groups field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Groups field is set to the value of the last call.
+func (b *TokenClaimMappingsApplyConfiguration) WithGroups(value *PrefixedClaimMappingApplyConfiguration) *TokenClaimMappingsApplyConfiguration {
+ b.Groups = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go
new file mode 100644
index 0000000000..6793f93279
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenclaimvalidationrule.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// TokenClaimValidationRuleApplyConfiguration represents an declarative configuration of the TokenClaimValidationRule type for use
+// with apply.
+type TokenClaimValidationRuleApplyConfiguration struct {
+ Type *v1.TokenValidationRuleType `json:"type,omitempty"`
+ RequiredClaim *TokenRequiredClaimApplyConfiguration `json:"requiredClaim,omitempty"`
+}
+
+// TokenClaimValidationRuleApplyConfiguration constructs an declarative configuration of the TokenClaimValidationRule type for use with
+// apply.
+func TokenClaimValidationRule() *TokenClaimValidationRuleApplyConfiguration {
+ return &TokenClaimValidationRuleApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *TokenClaimValidationRuleApplyConfiguration) WithType(value v1.TokenValidationRuleType) *TokenClaimValidationRuleApplyConfiguration {
+ b.Type = &value
+ return b
+}
+
+// WithRequiredClaim sets the RequiredClaim field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RequiredClaim field is set to the value of the last call.
+func (b *TokenClaimValidationRuleApplyConfiguration) WithRequiredClaim(value *TokenRequiredClaimApplyConfiguration) *TokenClaimValidationRuleApplyConfiguration {
+ b.RequiredClaim = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go
new file mode 100644
index 0000000000..865d4ddbe1
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenconfig.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// TokenConfigApplyConfiguration represents an declarative configuration of the TokenConfig type for use
+// with apply.
+type TokenConfigApplyConfiguration struct {
+ AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty"`
+ AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"`
+ AccessTokenInactivityTimeout *v1.Duration `json:"accessTokenInactivityTimeout,omitempty"`
+}
+
+// TokenConfigApplyConfiguration constructs an declarative configuration of the TokenConfig type for use with
+// apply.
+func TokenConfig() *TokenConfigApplyConfiguration {
+ return &TokenConfigApplyConfiguration{}
+}
+
+// WithAccessTokenMaxAgeSeconds sets the AccessTokenMaxAgeSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AccessTokenMaxAgeSeconds field is set to the value of the last call.
+func (b *TokenConfigApplyConfiguration) WithAccessTokenMaxAgeSeconds(value int32) *TokenConfigApplyConfiguration {
+ b.AccessTokenMaxAgeSeconds = &value
+ return b
+}
+
+// WithAccessTokenInactivityTimeoutSeconds sets the AccessTokenInactivityTimeoutSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AccessTokenInactivityTimeoutSeconds field is set to the value of the last call.
+func (b *TokenConfigApplyConfiguration) WithAccessTokenInactivityTimeoutSeconds(value int32) *TokenConfigApplyConfiguration {
+ b.AccessTokenInactivityTimeoutSeconds = &value
+ return b
+}
+
+// WithAccessTokenInactivityTimeout sets the AccessTokenInactivityTimeout field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AccessTokenInactivityTimeout field is set to the value of the last call.
+func (b *TokenConfigApplyConfiguration) WithAccessTokenInactivityTimeout(value v1.Duration) *TokenConfigApplyConfiguration {
+ b.AccessTokenInactivityTimeout = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go
new file mode 100644
index 0000000000..808e61a1db
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenissuer.go
@@ -0,0 +1,47 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// TokenIssuerApplyConfiguration represents an declarative configuration of the TokenIssuer type for use
+// with apply.
+type TokenIssuerApplyConfiguration struct {
+ URL *string `json:"issuerURL,omitempty"`
+ Audiences []v1.TokenAudience `json:"audiences,omitempty"`
+ CertificateAuthority *ConfigMapNameReferenceApplyConfiguration `json:"issuerCertificateAuthority,omitempty"`
+}
+
+// TokenIssuerApplyConfiguration constructs an declarative configuration of the TokenIssuer type for use with
+// apply.
+func TokenIssuer() *TokenIssuerApplyConfiguration {
+ return &TokenIssuerApplyConfiguration{}
+}
+
+// WithURL sets the URL field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the URL field is set to the value of the last call.
+func (b *TokenIssuerApplyConfiguration) WithURL(value string) *TokenIssuerApplyConfiguration {
+ b.URL = &value
+ return b
+}
+
+// WithAudiences adds the given value to the Audiences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Audiences field.
+func (b *TokenIssuerApplyConfiguration) WithAudiences(values ...v1.TokenAudience) *TokenIssuerApplyConfiguration {
+ for i := range values {
+ b.Audiences = append(b.Audiences, values[i])
+ }
+ return b
+}
+
+// WithCertificateAuthority sets the CertificateAuthority field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CertificateAuthority field is set to the value of the last call.
+func (b *TokenIssuerApplyConfiguration) WithCertificateAuthority(value *ConfigMapNameReferenceApplyConfiguration) *TokenIssuerApplyConfiguration {
+ b.CertificateAuthority = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go
new file mode 100644
index 0000000000..f7ae34d017
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/tokenrequiredclaim.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// TokenRequiredClaimApplyConfiguration represents an declarative configuration of the TokenRequiredClaim type for use
+// with apply.
+type TokenRequiredClaimApplyConfiguration struct {
+ Claim *string `json:"claim,omitempty"`
+ RequiredValue *string `json:"requiredValue,omitempty"`
+}
+
+// TokenRequiredClaimApplyConfiguration constructs an declarative configuration of the TokenRequiredClaim type for use with
+// apply.
+func TokenRequiredClaim() *TokenRequiredClaimApplyConfiguration {
+ return &TokenRequiredClaimApplyConfiguration{}
+}
+
+// WithClaim sets the Claim field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Claim field is set to the value of the last call.
+func (b *TokenRequiredClaimApplyConfiguration) WithClaim(value string) *TokenRequiredClaimApplyConfiguration {
+ b.Claim = &value
+ return b
+}
+
+// WithRequiredValue sets the RequiredValue field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RequiredValue field is set to the value of the last call.
+func (b *TokenRequiredClaimApplyConfiguration) WithRequiredValue(value string) *TokenRequiredClaimApplyConfiguration {
+ b.RequiredValue = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go
new file mode 100644
index 0000000000..1f63851c24
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go
@@ -0,0 +1,54 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// UpdateApplyConfiguration represents an declarative configuration of the Update type for use
+// with apply.
+type UpdateApplyConfiguration struct {
+ Architecture *v1.ClusterVersionArchitecture `json:"architecture,omitempty"`
+ Version *string `json:"version,omitempty"`
+ Image *string `json:"image,omitempty"`
+ Force *bool `json:"force,omitempty"`
+}
+
+// UpdateApplyConfiguration constructs an declarative configuration of the Update type for use with
+// apply.
+func Update() *UpdateApplyConfiguration {
+ return &UpdateApplyConfiguration{}
+}
+
+// WithArchitecture sets the Architecture field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Architecture field is set to the value of the last call.
+func (b *UpdateApplyConfiguration) WithArchitecture(value v1.ClusterVersionArchitecture) *UpdateApplyConfiguration {
+ b.Architecture = &value
+ return b
+}
+
+// WithVersion sets the Version field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Version field is set to the value of the last call.
+func (b *UpdateApplyConfiguration) WithVersion(value string) *UpdateApplyConfiguration {
+ b.Version = &value
+ return b
+}
+
+// WithImage sets the Image field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Image field is set to the value of the last call.
+func (b *UpdateApplyConfiguration) WithImage(value string) *UpdateApplyConfiguration {
+ b.Image = &value
+ return b
+}
+
+// WithForce sets the Force field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Force field is set to the value of the last call.
+func (b *UpdateApplyConfiguration) WithForce(value bool) *UpdateApplyConfiguration {
+ b.Force = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go
new file mode 100644
index 0000000000..17dc2a0a10
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/updatehistory.go
@@ -0,0 +1,82 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// UpdateHistoryApplyConfiguration represents an declarative configuration of the UpdateHistory type for use
+// with apply.
+type UpdateHistoryApplyConfiguration struct {
+ State *v1.UpdateState `json:"state,omitempty"`
+ StartedTime *metav1.Time `json:"startedTime,omitempty"`
+ CompletionTime *metav1.Time `json:"completionTime,omitempty"`
+ Version *string `json:"version,omitempty"`
+ Image *string `json:"image,omitempty"`
+ Verified *bool `json:"verified,omitempty"`
+ AcceptedRisks *string `json:"acceptedRisks,omitempty"`
+}
+
+// UpdateHistoryApplyConfiguration constructs an declarative configuration of the UpdateHistory type for use with
+// apply.
+func UpdateHistory() *UpdateHistoryApplyConfiguration {
+ return &UpdateHistoryApplyConfiguration{}
+}
+
+// WithState sets the State field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the State field is set to the value of the last call.
+func (b *UpdateHistoryApplyConfiguration) WithState(value v1.UpdateState) *UpdateHistoryApplyConfiguration {
+ b.State = &value
+ return b
+}
+
+// WithStartedTime sets the StartedTime field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the StartedTime field is set to the value of the last call.
+func (b *UpdateHistoryApplyConfiguration) WithStartedTime(value metav1.Time) *UpdateHistoryApplyConfiguration {
+ b.StartedTime = &value
+ return b
+}
+
+// WithCompletionTime sets the CompletionTime field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CompletionTime field is set to the value of the last call.
+func (b *UpdateHistoryApplyConfiguration) WithCompletionTime(value metav1.Time) *UpdateHistoryApplyConfiguration {
+ b.CompletionTime = &value
+ return b
+}
+
+// WithVersion sets the Version field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Version field is set to the value of the last call.
+func (b *UpdateHistoryApplyConfiguration) WithVersion(value string) *UpdateHistoryApplyConfiguration {
+ b.Version = &value
+ return b
+}
+
+// WithImage sets the Image field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Image field is set to the value of the last call.
+func (b *UpdateHistoryApplyConfiguration) WithImage(value string) *UpdateHistoryApplyConfiguration {
+ b.Image = &value
+ return b
+}
+
+// WithVerified sets the Verified field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Verified field is set to the value of the last call.
+func (b *UpdateHistoryApplyConfiguration) WithVerified(value bool) *UpdateHistoryApplyConfiguration {
+ b.Verified = &value
+ return b
+}
+
+// WithAcceptedRisks sets the AcceptedRisks field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AcceptedRisks field is set to the value of the last call.
+func (b *UpdateHistoryApplyConfiguration) WithAcceptedRisks(value string) *UpdateHistoryApplyConfiguration {
+ b.AcceptedRisks = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go
new file mode 100644
index 0000000000..641fb48b28
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// UsernameClaimMappingApplyConfiguration represents an declarative configuration of the UsernameClaimMapping type for use
+// with apply.
+type UsernameClaimMappingApplyConfiguration struct {
+ TokenClaimMappingApplyConfiguration `json:",inline"`
+ PrefixPolicy *configv1.UsernamePrefixPolicy `json:"prefixPolicy,omitempty"`
+ Prefix *UsernamePrefixApplyConfiguration `json:"prefix,omitempty"`
+}
+
+// UsernameClaimMappingApplyConfiguration constructs an declarative configuration of the UsernameClaimMapping type for use with
+// apply.
+func UsernameClaimMapping() *UsernameClaimMappingApplyConfiguration {
+ return &UsernameClaimMappingApplyConfiguration{}
+}
+
+// WithClaim sets the Claim field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Claim field is set to the value of the last call.
+func (b *UsernameClaimMappingApplyConfiguration) WithClaim(value string) *UsernameClaimMappingApplyConfiguration {
+ b.Claim = &value
+ return b
+}
+
+// WithPrefixPolicy sets the PrefixPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PrefixPolicy field is set to the value of the last call.
+func (b *UsernameClaimMappingApplyConfiguration) WithPrefixPolicy(value configv1.UsernamePrefixPolicy) *UsernameClaimMappingApplyConfiguration {
+ b.PrefixPolicy = &value
+ return b
+}
+
+// WithPrefix sets the Prefix field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Prefix field is set to the value of the last call.
+func (b *UsernameClaimMappingApplyConfiguration) WithPrefix(value *UsernamePrefixApplyConfiguration) *UsernameClaimMappingApplyConfiguration {
+ b.Prefix = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go
new file mode 100644
index 0000000000..b95bc9ba64
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameprefix.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// UsernamePrefixApplyConfiguration represents an declarative configuration of the UsernamePrefix type for use
+// with apply.
+type UsernamePrefixApplyConfiguration struct {
+ PrefixString *string `json:"prefixString,omitempty"`
+}
+
+// UsernamePrefixApplyConfiguration constructs an declarative configuration of the UsernamePrefix type for use with
+// apply.
+func UsernamePrefix() *UsernamePrefixApplyConfiguration {
+ return &UsernamePrefixApplyConfiguration{}
+}
+
+// WithPrefixString sets the PrefixString field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PrefixString field is set to the value of the last call.
+func (b *UsernamePrefixApplyConfiguration) WithPrefixString(value string) *UsernamePrefixApplyConfiguration {
+ b.PrefixString = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go
new file mode 100644
index 0000000000..0bad0fadf8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go
@@ -0,0 +1,59 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// VSpherePlatformFailureDomainSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformFailureDomainSpec type for use
+// with apply.
+type VSpherePlatformFailureDomainSpecApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Region *string `json:"region,omitempty"`
+ Zone *string `json:"zone,omitempty"`
+ Server *string `json:"server,omitempty"`
+ Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"`
+}
+
+// VSpherePlatformFailureDomainSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformFailureDomainSpec type for use with
+// apply.
+func VSpherePlatformFailureDomainSpec() *VSpherePlatformFailureDomainSpecApplyConfiguration {
+ return &VSpherePlatformFailureDomainSpecApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithName(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithRegion sets the Region field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Region field is set to the value of the last call.
+func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithRegion(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration {
+ b.Region = &value
+ return b
+}
+
+// WithZone sets the Zone field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Zone field is set to the value of the last call.
+func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZone(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration {
+ b.Zone = &value
+ return b
+}
+
+// WithServer sets the Server field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Server field is set to the value of the last call.
+func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithServer(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration {
+ b.Server = &value
+ return b
+}
+
+// WithTopology sets the Topology field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Topology field is set to the value of the last call.
+func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithTopology(value *VSpherePlatformTopologyApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration {
+ b.Topology = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go
new file mode 100644
index 0000000000..873f0289e8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+)
+
+// VSpherePlatformLoadBalancerApplyConfiguration represents an declarative configuration of the VSpherePlatformLoadBalancer type for use
+// with apply.
+type VSpherePlatformLoadBalancerApplyConfiguration struct {
+ Type *v1.PlatformLoadBalancerType `json:"type,omitempty"`
+}
+
+// VSpherePlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the VSpherePlatformLoadBalancer type for use with
+// apply.
+func VSpherePlatformLoadBalancer() *VSpherePlatformLoadBalancerApplyConfiguration {
+ return &VSpherePlatformLoadBalancerApplyConfiguration{}
+}
+
+// WithType sets the Type field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Type field is set to the value of the last call.
+func (b *VSpherePlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *VSpherePlatformLoadBalancerApplyConfiguration {
+ b.Type = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go
new file mode 100644
index 0000000000..042737f1d8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// VSpherePlatformNodeNetworkingApplyConfiguration represents an declarative configuration of the VSpherePlatformNodeNetworking type for use
+// with apply.
+type VSpherePlatformNodeNetworkingApplyConfiguration struct {
+ External *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"external,omitempty"`
+ Internal *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"internal,omitempty"`
+}
+
+// VSpherePlatformNodeNetworkingApplyConfiguration constructs an declarative configuration of the VSpherePlatformNodeNetworking type for use with
+// apply.
+func VSpherePlatformNodeNetworking() *VSpherePlatformNodeNetworkingApplyConfiguration {
+ return &VSpherePlatformNodeNetworkingApplyConfiguration{}
+}
+
+// WithExternal sets the External field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the External field is set to the value of the last call.
+func (b *VSpherePlatformNodeNetworkingApplyConfiguration) WithExternal(value *VSpherePlatformNodeNetworkingSpecApplyConfiguration) *VSpherePlatformNodeNetworkingApplyConfiguration {
+ b.External = value
+ return b
+}
+
+// WithInternal sets the Internal field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Internal field is set to the value of the last call.
+func (b *VSpherePlatformNodeNetworkingApplyConfiguration) WithInternal(value *VSpherePlatformNodeNetworkingSpecApplyConfiguration) *VSpherePlatformNodeNetworkingApplyConfiguration {
+ b.Internal = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go
new file mode 100644
index 0000000000..e13c42d64e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// VSpherePlatformNodeNetworkingSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use
+// with apply.
+type VSpherePlatformNodeNetworkingSpecApplyConfiguration struct {
+ NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"`
+ Network *string `json:"network,omitempty"`
+ ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"`
+}
+
+// VSpherePlatformNodeNetworkingSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use with
+// apply.
+func VSpherePlatformNodeNetworkingSpec() *VSpherePlatformNodeNetworkingSpecApplyConfiguration {
+ return &VSpherePlatformNodeNetworkingSpecApplyConfiguration{}
+}
+
+// WithNetworkSubnetCIDR adds the given value to the NetworkSubnetCIDR field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the NetworkSubnetCIDR field.
+func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithNetworkSubnetCIDR(values ...string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration {
+ for i := range values {
+ b.NetworkSubnetCIDR = append(b.NetworkSubnetCIDR, values[i])
+ }
+ return b
+}
+
+// WithNetwork sets the Network field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Network field is set to the value of the last call.
+func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithNetwork(value string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration {
+ b.Network = &value
+ return b
+}
+
+// WithExcludeNetworkSubnetCIDR adds the given value to the ExcludeNetworkSubnetCIDR field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ExcludeNetworkSubnetCIDR field.
+func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithExcludeNetworkSubnetCIDR(values ...string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration {
+ for i := range values {
+ b.ExcludeNetworkSubnetCIDR = append(b.ExcludeNetworkSubnetCIDR, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go
new file mode 100644
index 0000000000..f51fc93d53
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go
@@ -0,0 +1,88 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// VSpherePlatformSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformSpec type for use
+// with apply.
+type VSpherePlatformSpecApplyConfiguration struct {
+ VCenters []VSpherePlatformVCenterSpecApplyConfiguration `json:"vcenters,omitempty"`
+ FailureDomains []VSpherePlatformFailureDomainSpecApplyConfiguration `json:"failureDomains,omitempty"`
+ NodeNetworking *VSpherePlatformNodeNetworkingApplyConfiguration `json:"nodeNetworking,omitempty"`
+ APIServerInternalIPs []configv1.IP `json:"apiServerInternalIPs,omitempty"`
+ IngressIPs []configv1.IP `json:"ingressIPs,omitempty"`
+ MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"`
+}
+
+// VSpherePlatformSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformSpec type for use with
+// apply.
+func VSpherePlatformSpec() *VSpherePlatformSpecApplyConfiguration {
+ return &VSpherePlatformSpecApplyConfiguration{}
+}
+
+// WithVCenters adds the given value to the VCenters field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the VCenters field.
+func (b *VSpherePlatformSpecApplyConfiguration) WithVCenters(values ...*VSpherePlatformVCenterSpecApplyConfiguration) *VSpherePlatformSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithVCenters")
+ }
+ b.VCenters = append(b.VCenters, *values[i])
+ }
+ return b
+}
+
+// WithFailureDomains adds the given value to the FailureDomains field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the FailureDomains field.
+func (b *VSpherePlatformSpecApplyConfiguration) WithFailureDomains(values ...*VSpherePlatformFailureDomainSpecApplyConfiguration) *VSpherePlatformSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithFailureDomains")
+ }
+ b.FailureDomains = append(b.FailureDomains, *values[i])
+ }
+ return b
+}
+
+// WithNodeNetworking sets the NodeNetworking field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeNetworking field is set to the value of the last call.
+func (b *VSpherePlatformSpecApplyConfiguration) WithNodeNetworking(value *VSpherePlatformNodeNetworkingApplyConfiguration) *VSpherePlatformSpecApplyConfiguration {
+ b.NodeNetworking = value
+ return b
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *VSpherePlatformSpecApplyConfiguration) WithAPIServerInternalIPs(values ...configv1.IP) *VSpherePlatformSpecApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *VSpherePlatformSpecApplyConfiguration) WithIngressIPs(values ...configv1.IP) *VSpherePlatformSpecApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MachineNetworks field.
+func (b *VSpherePlatformSpecApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *VSpherePlatformSpecApplyConfiguration {
+ for i := range values {
+ b.MachineNetworks = append(b.MachineNetworks, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go
new file mode 100644
index 0000000000..11a0420d5b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go
@@ -0,0 +1,87 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+)
+
+// VSpherePlatformStatusApplyConfiguration represents an declarative configuration of the VSpherePlatformStatus type for use
+// with apply.
+type VSpherePlatformStatusApplyConfiguration struct {
+ APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"`
+ APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"`
+ IngressIP *string `json:"ingressIP,omitempty"`
+ IngressIPs []string `json:"ingressIPs,omitempty"`
+ NodeDNSIP *string `json:"nodeDNSIP,omitempty"`
+ LoadBalancer *VSpherePlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"`
+ MachineNetworks []configv1.CIDR `json:"machineNetworks,omitempty"`
+}
+
+// VSpherePlatformStatusApplyConfiguration constructs an declarative configuration of the VSpherePlatformStatus type for use with
+// apply.
+func VSpherePlatformStatus() *VSpherePlatformStatusApplyConfiguration {
+ return &VSpherePlatformStatusApplyConfiguration{}
+}
+
+// WithAPIServerInternalIP sets the APIServerInternalIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIServerInternalIP field is set to the value of the last call.
+func (b *VSpherePlatformStatusApplyConfiguration) WithAPIServerInternalIP(value string) *VSpherePlatformStatusApplyConfiguration {
+ b.APIServerInternalIP = &value
+ return b
+}
+
+// WithAPIServerInternalIPs adds the given value to the APIServerInternalIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIServerInternalIPs field.
+func (b *VSpherePlatformStatusApplyConfiguration) WithAPIServerInternalIPs(values ...string) *VSpherePlatformStatusApplyConfiguration {
+ for i := range values {
+ b.APIServerInternalIPs = append(b.APIServerInternalIPs, values[i])
+ }
+ return b
+}
+
+// WithIngressIP sets the IngressIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IngressIP field is set to the value of the last call.
+func (b *VSpherePlatformStatusApplyConfiguration) WithIngressIP(value string) *VSpherePlatformStatusApplyConfiguration {
+ b.IngressIP = &value
+ return b
+}
+
+// WithIngressIPs adds the given value to the IngressIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the IngressIPs field.
+func (b *VSpherePlatformStatusApplyConfiguration) WithIngressIPs(values ...string) *VSpherePlatformStatusApplyConfiguration {
+ for i := range values {
+ b.IngressIPs = append(b.IngressIPs, values[i])
+ }
+ return b
+}
+
+// WithNodeDNSIP sets the NodeDNSIP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeDNSIP field is set to the value of the last call.
+func (b *VSpherePlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *VSpherePlatformStatusApplyConfiguration {
+ b.NodeDNSIP = &value
+ return b
+}
+
+// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LoadBalancer field is set to the value of the last call.
+func (b *VSpherePlatformStatusApplyConfiguration) WithLoadBalancer(value *VSpherePlatformLoadBalancerApplyConfiguration) *VSpherePlatformStatusApplyConfiguration {
+ b.LoadBalancer = value
+ return b
+}
+
+// WithMachineNetworks adds the given value to the MachineNetworks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MachineNetworks field.
+func (b *VSpherePlatformStatusApplyConfiguration) WithMachineNetworks(values ...configv1.CIDR) *VSpherePlatformStatusApplyConfiguration {
+ for i := range values {
+ b.MachineNetworks = append(b.MachineNetworks, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go
new file mode 100644
index 0000000000..a16213812e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go
@@ -0,0 +1,79 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// VSpherePlatformTopologyApplyConfiguration represents an declarative configuration of the VSpherePlatformTopology type for use
+// with apply.
+type VSpherePlatformTopologyApplyConfiguration struct {
+ Datacenter *string `json:"datacenter,omitempty"`
+ ComputeCluster *string `json:"computeCluster,omitempty"`
+ Networks []string `json:"networks,omitempty"`
+ Datastore *string `json:"datastore,omitempty"`
+ ResourcePool *string `json:"resourcePool,omitempty"`
+ Folder *string `json:"folder,omitempty"`
+ Template *string `json:"template,omitempty"`
+}
+
+// VSpherePlatformTopologyApplyConfiguration constructs an declarative configuration of the VSpherePlatformTopology type for use with
+// apply.
+func VSpherePlatformTopology() *VSpherePlatformTopologyApplyConfiguration {
+ return &VSpherePlatformTopologyApplyConfiguration{}
+}
+
+// WithDatacenter sets the Datacenter field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Datacenter field is set to the value of the last call.
+func (b *VSpherePlatformTopologyApplyConfiguration) WithDatacenter(value string) *VSpherePlatformTopologyApplyConfiguration {
+ b.Datacenter = &value
+ return b
+}
+
+// WithComputeCluster sets the ComputeCluster field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ComputeCluster field is set to the value of the last call.
+func (b *VSpherePlatformTopologyApplyConfiguration) WithComputeCluster(value string) *VSpherePlatformTopologyApplyConfiguration {
+ b.ComputeCluster = &value
+ return b
+}
+
+// WithNetworks adds the given value to the Networks field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Networks field.
+func (b *VSpherePlatformTopologyApplyConfiguration) WithNetworks(values ...string) *VSpherePlatformTopologyApplyConfiguration {
+ for i := range values {
+ b.Networks = append(b.Networks, values[i])
+ }
+ return b
+}
+
+// WithDatastore sets the Datastore field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Datastore field is set to the value of the last call.
+func (b *VSpherePlatformTopologyApplyConfiguration) WithDatastore(value string) *VSpherePlatformTopologyApplyConfiguration {
+ b.Datastore = &value
+ return b
+}
+
+// WithResourcePool sets the ResourcePool field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourcePool field is set to the value of the last call.
+func (b *VSpherePlatformTopologyApplyConfiguration) WithResourcePool(value string) *VSpherePlatformTopologyApplyConfiguration {
+ b.ResourcePool = &value
+ return b
+}
+
+// WithFolder sets the Folder field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Folder field is set to the value of the last call.
+func (b *VSpherePlatformTopologyApplyConfiguration) WithFolder(value string) *VSpherePlatformTopologyApplyConfiguration {
+ b.Folder = &value
+ return b
+}
+
+// WithTemplate sets the Template field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Template field is set to the value of the last call.
+func (b *VSpherePlatformTopologyApplyConfiguration) WithTemplate(value string) *VSpherePlatformTopologyApplyConfiguration {
+ b.Template = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go
new file mode 100644
index 0000000000..59b2261c24
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go
@@ -0,0 +1,43 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// VSpherePlatformVCenterSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformVCenterSpec type for use
+// with apply.
+type VSpherePlatformVCenterSpecApplyConfiguration struct {
+ Server *string `json:"server,omitempty"`
+ Port *int32 `json:"port,omitempty"`
+ Datacenters []string `json:"datacenters,omitempty"`
+}
+
+// VSpherePlatformVCenterSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformVCenterSpec type for use with
+// apply.
+func VSpherePlatformVCenterSpec() *VSpherePlatformVCenterSpecApplyConfiguration {
+ return &VSpherePlatformVCenterSpecApplyConfiguration{}
+}
+
+// WithServer sets the Server field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Server field is set to the value of the last call.
+func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithServer(value string) *VSpherePlatformVCenterSpecApplyConfiguration {
+ b.Server = &value
+ return b
+}
+
+// WithPort sets the Port field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Port field is set to the value of the last call.
+func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithPort(value int32) *VSpherePlatformVCenterSpecApplyConfiguration {
+ b.Port = &value
+ return b
+}
+
+// WithDatacenters adds the given value to the Datacenters field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Datacenters field.
+func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithDatacenters(values ...string) *VSpherePlatformVCenterSpecApplyConfiguration {
+ for i := range values {
+ b.Datacenters = append(b.Datacenters, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go
new file mode 100644
index 0000000000..fc15008599
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/webhooktokenauthenticator.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// WebhookTokenAuthenticatorApplyConfiguration represents an declarative configuration of the WebhookTokenAuthenticator type for use
+// with apply.
+type WebhookTokenAuthenticatorApplyConfiguration struct {
+ KubeConfig *SecretNameReferenceApplyConfiguration `json:"kubeConfig,omitempty"`
+}
+
+// WebhookTokenAuthenticatorApplyConfiguration constructs an declarative configuration of the WebhookTokenAuthenticator type for use with
+// apply.
+func WebhookTokenAuthenticator() *WebhookTokenAuthenticatorApplyConfiguration {
+ return &WebhookTokenAuthenticatorApplyConfiguration{}
+}
+
+// WithKubeConfig sets the KubeConfig field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the KubeConfig field is set to the value of the last call.
+func (b *WebhookTokenAuthenticatorApplyConfiguration) WithKubeConfig(value *SecretNameReferenceApplyConfiguration) *WebhookTokenAuthenticatorApplyConfiguration {
+ b.KubeConfig = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go
new file mode 100644
index 0000000000..d06f12e509
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// BackupApplyConfiguration represents an declarative configuration of the Backup type for use
+// with apply.
+type BackupApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *BackupSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *configv1alpha1.BackupStatus `json:"status,omitempty"`
+}
+
+// Backup constructs an declarative configuration of the Backup type for use with
+// apply.
+func Backup(name string) *BackupApplyConfiguration {
+ b := &BackupApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("Backup")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b
+}
+
+// ExtractBackup extracts the applied configuration owned by fieldManager from
+// backup. If no managedFields are found in backup for fieldManager, a
+// BackupApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// backup must be a unmodified Backup API object that was retrieved from the Kubernetes API.
+// ExtractBackup provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractBackup(backup *configv1alpha1.Backup, fieldManager string) (*BackupApplyConfiguration, error) {
+ return extractBackup(backup, fieldManager, "")
+}
+
+// ExtractBackupStatus is the same as ExtractBackup except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractBackupStatus(backup *configv1alpha1.Backup, fieldManager string) (*BackupApplyConfiguration, error) {
+ return extractBackup(backup, fieldManager, "status")
+}
+
+func extractBackup(backup *configv1alpha1.Backup, fieldManager string, subresource string) (*BackupApplyConfiguration, error) {
+ b := &BackupApplyConfiguration{}
+ err := managedfields.ExtractInto(backup, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.Backup"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(backup.Name)
+
+ b.WithKind("Backup")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithKind(value string) *BackupApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithAPIVersion(value string) *BackupApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithName(value string) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithGenerateName(value string) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithNamespace(value string) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithUID(value types.UID) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithResourceVersion(value string) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithGeneration(value int64) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *BackupApplyConfiguration) WithLabels(entries map[string]string) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *BackupApplyConfiguration) WithAnnotations(entries map[string]string) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *BackupApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *BackupApplyConfiguration) WithFinalizers(values ...string) *BackupApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *BackupApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithSpec(value *BackupSpecApplyConfiguration) *BackupApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *BackupApplyConfiguration) WithStatus(value configv1alpha1.BackupStatus) *BackupApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backupspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backupspec.go
new file mode 100644
index 0000000000..8ecb7e8136
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backupspec.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// BackupSpecApplyConfiguration represents an declarative configuration of the BackupSpec type for use
+// with apply.
+type BackupSpecApplyConfiguration struct {
+ EtcdBackupSpec *EtcdBackupSpecApplyConfiguration `json:"etcd,omitempty"`
+}
+
+// BackupSpecApplyConfiguration constructs an declarative configuration of the BackupSpec type for use with
+// apply.
+func BackupSpec() *BackupSpecApplyConfiguration {
+ return &BackupSpecApplyConfiguration{}
+}
+
+// WithEtcdBackupSpec sets the EtcdBackupSpec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the EtcdBackupSpec field is set to the value of the last call.
+func (b *BackupSpecApplyConfiguration) WithEtcdBackupSpec(value *EtcdBackupSpecApplyConfiguration) *BackupSpecApplyConfiguration {
+ b.EtcdBackupSpec = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go
new file mode 100644
index 0000000000..68089a6a72
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ClusterImagePolicyApplyConfiguration represents an declarative configuration of the ClusterImagePolicy type for use
+// with apply.
+type ClusterImagePolicyApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ClusterImagePolicySpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ClusterImagePolicyStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// ClusterImagePolicy constructs an declarative configuration of the ClusterImagePolicy type for use with
+// apply.
+func ClusterImagePolicy(name string) *ClusterImagePolicyApplyConfiguration {
+ b := &ClusterImagePolicyApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ClusterImagePolicy")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b
+}
+
+// ExtractClusterImagePolicy extracts the applied configuration owned by fieldManager from
+// clusterImagePolicy. If no managedFields are found in clusterImagePolicy for fieldManager, a
+// ClusterImagePolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterImagePolicy must be a unmodified ClusterImagePolicy API object that was retrieved from the Kubernetes API.
+// ExtractClusterImagePolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractClusterImagePolicy(clusterImagePolicy *configv1alpha1.ClusterImagePolicy, fieldManager string) (*ClusterImagePolicyApplyConfiguration, error) {
+ return extractClusterImagePolicy(clusterImagePolicy, fieldManager, "")
+}
+
+// ExtractClusterImagePolicyStatus is the same as ExtractClusterImagePolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterImagePolicyStatus(clusterImagePolicy *configv1alpha1.ClusterImagePolicy, fieldManager string) (*ClusterImagePolicyApplyConfiguration, error) {
+ return extractClusterImagePolicy(clusterImagePolicy, fieldManager, "status")
+}
+
+func extractClusterImagePolicy(clusterImagePolicy *configv1alpha1.ClusterImagePolicy, fieldManager string, subresource string) (*ClusterImagePolicyApplyConfiguration, error) {
+ b := &ClusterImagePolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(clusterImagePolicy, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.ClusterImagePolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(clusterImagePolicy.Name)
+
+ b.WithKind("ClusterImagePolicy")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithKind(value string) *ClusterImagePolicyApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithAPIVersion(value string) *ClusterImagePolicyApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithName(value string) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithGenerateName(value string) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithNamespace(value string) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithUID(value types.UID) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithResourceVersion(value string) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithGeneration(value int64) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ClusterImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ClusterImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ClusterImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ClusterImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ClusterImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ClusterImagePolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithSpec(value *ClusterImagePolicySpecApplyConfiguration) *ClusterImagePolicyApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ClusterImagePolicyApplyConfiguration) WithStatus(value *ClusterImagePolicyStatusApplyConfiguration) *ClusterImagePolicyApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go
new file mode 100644
index 0000000000..64dfa92dd6
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicyspec.go
@@ -0,0 +1,38 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// ClusterImagePolicySpecApplyConfiguration represents an declarative configuration of the ClusterImagePolicySpec type for use
+// with apply.
+type ClusterImagePolicySpecApplyConfiguration struct {
+ Scopes []v1alpha1.ImageScope `json:"scopes,omitempty"`
+ Policy *PolicyApplyConfiguration `json:"policy,omitempty"`
+}
+
+// ClusterImagePolicySpecApplyConfiguration constructs an declarative configuration of the ClusterImagePolicySpec type for use with
+// apply.
+func ClusterImagePolicySpec() *ClusterImagePolicySpecApplyConfiguration {
+ return &ClusterImagePolicySpecApplyConfiguration{}
+}
+
+// WithScopes adds the given value to the Scopes field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Scopes field.
+func (b *ClusterImagePolicySpecApplyConfiguration) WithScopes(values ...v1alpha1.ImageScope) *ClusterImagePolicySpecApplyConfiguration {
+ for i := range values {
+ b.Scopes = append(b.Scopes, values[i])
+ }
+ return b
+}
+
+// WithPolicy sets the Policy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Policy field is set to the value of the last call.
+func (b *ClusterImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ClusterImagePolicySpecApplyConfiguration {
+ b.Policy = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicystatus.go
new file mode 100644
index 0000000000..0b5ea75358
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicystatus.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ClusterImagePolicyStatusApplyConfiguration represents an declarative configuration of the ClusterImagePolicyStatus type for use
+// with apply.
+type ClusterImagePolicyStatusApplyConfiguration struct {
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+}
+
+// ClusterImagePolicyStatusApplyConfiguration constructs an declarative configuration of the ClusterImagePolicyStatus type for use with
+// apply.
+func ClusterImagePolicyStatus() *ClusterImagePolicyStatusApplyConfiguration {
+ return &ClusterImagePolicyStatusApplyConfiguration{}
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *ClusterImagePolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ClusterImagePolicyStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/etcdbackupspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/etcdbackupspec.go
new file mode 100644
index 0000000000..4255313b1a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/etcdbackupspec.go
@@ -0,0 +1,50 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// EtcdBackupSpecApplyConfiguration represents an declarative configuration of the EtcdBackupSpec type for use
+// with apply.
+type EtcdBackupSpecApplyConfiguration struct {
+ Schedule *string `json:"schedule,omitempty"`
+ TimeZone *string `json:"timeZone,omitempty"`
+ RetentionPolicy *RetentionPolicyApplyConfiguration `json:"retentionPolicy,omitempty"`
+ PVCName *string `json:"pvcName,omitempty"`
+}
+
+// EtcdBackupSpecApplyConfiguration constructs an declarative configuration of the EtcdBackupSpec type for use with
+// apply.
+func EtcdBackupSpec() *EtcdBackupSpecApplyConfiguration {
+ return &EtcdBackupSpecApplyConfiguration{}
+}
+
+// WithSchedule sets the Schedule field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Schedule field is set to the value of the last call.
+func (b *EtcdBackupSpecApplyConfiguration) WithSchedule(value string) *EtcdBackupSpecApplyConfiguration {
+ b.Schedule = &value
+ return b
+}
+
+// WithTimeZone sets the TimeZone field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TimeZone field is set to the value of the last call.
+func (b *EtcdBackupSpecApplyConfiguration) WithTimeZone(value string) *EtcdBackupSpecApplyConfiguration {
+ b.TimeZone = &value
+ return b
+}
+
+// WithRetentionPolicy sets the RetentionPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RetentionPolicy field is set to the value of the last call.
+func (b *EtcdBackupSpecApplyConfiguration) WithRetentionPolicy(value *RetentionPolicyApplyConfiguration) *EtcdBackupSpecApplyConfiguration {
+ b.RetentionPolicy = value
+ return b
+}
+
+// WithPVCName sets the PVCName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PVCName field is set to the value of the last call.
+func (b *EtcdBackupSpecApplyConfiguration) WithPVCName(value string) *EtcdBackupSpecApplyConfiguration {
+ b.PVCName = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go
new file mode 100644
index 0000000000..681d1ce165
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/fulciocawithrekor.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// FulcioCAWithRekorApplyConfiguration represents an declarative configuration of the FulcioCAWithRekor type for use
+// with apply.
+type FulcioCAWithRekorApplyConfiguration struct {
+ FulcioCAData []byte `json:"fulcioCAData,omitempty"`
+ RekorKeyData []byte `json:"rekorKeyData,omitempty"`
+ FulcioSubject *PolicyFulcioSubjectApplyConfiguration `json:"fulcioSubject,omitempty"`
+}
+
+// FulcioCAWithRekorApplyConfiguration constructs an declarative configuration of the FulcioCAWithRekor type for use with
+// apply.
+func FulcioCAWithRekor() *FulcioCAWithRekorApplyConfiguration {
+ return &FulcioCAWithRekorApplyConfiguration{}
+}
+
+// WithFulcioCAData adds the given value to the FulcioCAData field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the FulcioCAData field.
+func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioCAData(values ...byte) *FulcioCAWithRekorApplyConfiguration {
+ for i := range values {
+ b.FulcioCAData = append(b.FulcioCAData, values[i])
+ }
+ return b
+}
+
+// WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the RekorKeyData field.
+func (b *FulcioCAWithRekorApplyConfiguration) WithRekorKeyData(values ...byte) *FulcioCAWithRekorApplyConfiguration {
+ for i := range values {
+ b.RekorKeyData = append(b.RekorKeyData, values[i])
+ }
+ return b
+}
+
+// WithFulcioSubject sets the FulcioSubject field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FulcioSubject field is set to the value of the last call.
+func (b *FulcioCAWithRekorApplyConfiguration) WithFulcioSubject(value *PolicyFulcioSubjectApplyConfiguration) *FulcioCAWithRekorApplyConfiguration {
+ b.FulcioSubject = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go
new file mode 100644
index 0000000000..2eec8ffd27
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go
@@ -0,0 +1,38 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// GatherConfigApplyConfiguration represents an declarative configuration of the GatherConfig type for use
+// with apply.
+type GatherConfigApplyConfiguration struct {
+ DataPolicy *v1alpha1.DataPolicy `json:"dataPolicy,omitempty"`
+ DisabledGatherers []string `json:"disabledGatherers,omitempty"`
+}
+
+// GatherConfigApplyConfiguration constructs an declarative configuration of the GatherConfig type for use with
+// apply.
+func GatherConfig() *GatherConfigApplyConfiguration {
+ return &GatherConfigApplyConfiguration{}
+}
+
+// WithDataPolicy sets the DataPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DataPolicy field is set to the value of the last call.
+func (b *GatherConfigApplyConfiguration) WithDataPolicy(value v1alpha1.DataPolicy) *GatherConfigApplyConfiguration {
+ b.DataPolicy = &value
+ return b
+}
+
+// WithDisabledGatherers adds the given value to the DisabledGatherers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the DisabledGatherers field.
+func (b *GatherConfigApplyConfiguration) WithDisabledGatherers(values ...string) *GatherConfigApplyConfiguration {
+ for i := range values {
+ b.DisabledGatherers = append(b.DisabledGatherers, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go
new file mode 100644
index 0000000000..412a28fc71
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go
@@ -0,0 +1,242 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ImagePolicyApplyConfiguration represents an declarative configuration of the ImagePolicy type for use
+// with apply.
+type ImagePolicyApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ImagePolicySpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ImagePolicyStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// ImagePolicy constructs an declarative configuration of the ImagePolicy type for use with
+// apply.
+func ImagePolicy(name, namespace string) *ImagePolicyApplyConfiguration {
+ b := &ImagePolicyApplyConfiguration{}
+ b.WithName(name)
+ b.WithNamespace(namespace)
+ b.WithKind("ImagePolicy")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b
+}
+
+// ExtractImagePolicy extracts the applied configuration owned by fieldManager from
+// imagePolicy. If no managedFields are found in imagePolicy for fieldManager, a
+// ImagePolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// imagePolicy must be a unmodified ImagePolicy API object that was retrieved from the Kubernetes API.
+// ExtractImagePolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractImagePolicy(imagePolicy *configv1alpha1.ImagePolicy, fieldManager string) (*ImagePolicyApplyConfiguration, error) {
+ return extractImagePolicy(imagePolicy, fieldManager, "")
+}
+
+// ExtractImagePolicyStatus is the same as ExtractImagePolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractImagePolicyStatus(imagePolicy *configv1alpha1.ImagePolicy, fieldManager string) (*ImagePolicyApplyConfiguration, error) {
+ return extractImagePolicy(imagePolicy, fieldManager, "status")
+}
+
+func extractImagePolicy(imagePolicy *configv1alpha1.ImagePolicy, fieldManager string, subresource string) (*ImagePolicyApplyConfiguration, error) {
+ b := &ImagePolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(imagePolicy, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.ImagePolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(imagePolicy.Name)
+ b.WithNamespace(imagePolicy.Namespace)
+
+ b.WithKind("ImagePolicy")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithKind(value string) *ImagePolicyApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithAPIVersion(value string) *ImagePolicyApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithName(value string) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithGenerateName(value string) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithNamespace(value string) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithUID(value types.UID) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithResourceVersion(value string) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithGeneration(value int64) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ImagePolicyApplyConfiguration) WithLabels(entries map[string]string) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ImagePolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ImagePolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ImagePolicyApplyConfiguration) WithFinalizers(values ...string) *ImagePolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ImagePolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithSpec(value *ImagePolicySpecApplyConfiguration) *ImagePolicyApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ImagePolicyApplyConfiguration) WithStatus(value *ImagePolicyStatusApplyConfiguration) *ImagePolicyApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go
new file mode 100644
index 0000000000..aecf932a70
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicyspec.go
@@ -0,0 +1,38 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// ImagePolicySpecApplyConfiguration represents an declarative configuration of the ImagePolicySpec type for use
+// with apply.
+type ImagePolicySpecApplyConfiguration struct {
+ Scopes []v1alpha1.ImageScope `json:"scopes,omitempty"`
+ Policy *PolicyApplyConfiguration `json:"policy,omitempty"`
+}
+
+// ImagePolicySpecApplyConfiguration constructs an declarative configuration of the ImagePolicySpec type for use with
+// apply.
+func ImagePolicySpec() *ImagePolicySpecApplyConfiguration {
+ return &ImagePolicySpecApplyConfiguration{}
+}
+
+// WithScopes adds the given value to the Scopes field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Scopes field.
+func (b *ImagePolicySpecApplyConfiguration) WithScopes(values ...v1alpha1.ImageScope) *ImagePolicySpecApplyConfiguration {
+ for i := range values {
+ b.Scopes = append(b.Scopes, values[i])
+ }
+ return b
+}
+
+// WithPolicy sets the Policy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Policy field is set to the value of the last call.
+func (b *ImagePolicySpecApplyConfiguration) WithPolicy(value *PolicyApplyConfiguration) *ImagePolicySpecApplyConfiguration {
+ b.Policy = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicystatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicystatus.go
new file mode 100644
index 0000000000..d20853ae7d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicystatus.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ImagePolicyStatusApplyConfiguration represents an declarative configuration of the ImagePolicyStatus type for use
+// with apply.
+type ImagePolicyStatusApplyConfiguration struct {
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+}
+
+// ImagePolicyStatusApplyConfiguration constructs an declarative configuration of the ImagePolicyStatus type for use with
+// apply.
+func ImagePolicyStatus() *ImagePolicyStatusApplyConfiguration {
+ return &ImagePolicyStatusApplyConfiguration{}
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *ImagePolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ImagePolicyStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go
new file mode 100644
index 0000000000..b86f19208e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go
@@ -0,0 +1,240 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ internal "github.com/openshift/client-go/config/applyconfigurations/internal"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// InsightsDataGatherApplyConfiguration represents an declarative configuration of the InsightsDataGather type for use
+// with apply.
+type InsightsDataGatherApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *InsightsDataGatherSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *configv1alpha1.InsightsDataGatherStatus `json:"status,omitempty"`
+}
+
+// InsightsDataGather constructs an declarative configuration of the InsightsDataGather type for use with
+// apply.
+func InsightsDataGather(name string) *InsightsDataGatherApplyConfiguration {
+ b := &InsightsDataGatherApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("InsightsDataGather")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b
+}
+
+// ExtractInsightsDataGather extracts the applied configuration owned by fieldManager from
+// insightsDataGather. If no managedFields are found in insightsDataGather for fieldManager, a
+// InsightsDataGatherApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// insightsDataGather must be a unmodified InsightsDataGather API object that was retrieved from the Kubernetes API.
+// ExtractInsightsDataGather provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) {
+ return extractInsightsDataGather(insightsDataGather, fieldManager, "")
+}
+
+// ExtractInsightsDataGatherStatus is the same as ExtractInsightsDataGather except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractInsightsDataGatherStatus(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) {
+ return extractInsightsDataGather(insightsDataGather, fieldManager, "status")
+}
+
+func extractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string, subresource string) (*InsightsDataGatherApplyConfiguration, error) {
+ b := &InsightsDataGatherApplyConfiguration{}
+ err := managedfields.ExtractInto(insightsDataGather, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.InsightsDataGather"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(insightsDataGather.Name)
+
+ b.WithKind("InsightsDataGather")
+ b.WithAPIVersion("config.openshift.io/v1alpha1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsDataGatherApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *InsightsDataGatherApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]string) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *InsightsDataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *InsightsDataGatherApplyConfiguration) WithFinalizers(values ...string) *InsightsDataGatherApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *InsightsDataGatherApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithSpec(value *InsightsDataGatherSpecApplyConfiguration) *InsightsDataGatherApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *InsightsDataGatherApplyConfiguration) WithStatus(value configv1alpha1.InsightsDataGatherStatus) *InsightsDataGatherApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go
new file mode 100644
index 0000000000..44416cf858
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// InsightsDataGatherSpecApplyConfiguration represents an declarative configuration of the InsightsDataGatherSpec type for use
+// with apply.
+type InsightsDataGatherSpecApplyConfiguration struct {
+ GatherConfig *GatherConfigApplyConfiguration `json:"gatherConfig,omitempty"`
+}
+
+// InsightsDataGatherSpecApplyConfiguration constructs an declarative configuration of the InsightsDataGatherSpec type for use with
+// apply.
+func InsightsDataGatherSpec() *InsightsDataGatherSpecApplyConfiguration {
+ return &InsightsDataGatherSpecApplyConfiguration{}
+}
+
+// WithGatherConfig sets the GatherConfig field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GatherConfig field is set to the value of the last call.
+func (b *InsightsDataGatherSpecApplyConfiguration) WithGatherConfig(value *GatherConfigApplyConfiguration) *InsightsDataGatherSpecApplyConfiguration {
+ b.GatherConfig = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go
new file mode 100644
index 0000000000..f4697d2bbb
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policy.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// PolicyApplyConfiguration represents an declarative configuration of the Policy type for use
+// with apply.
+type PolicyApplyConfiguration struct {
+ RootOfTrust *PolicyRootOfTrustApplyConfiguration `json:"rootOfTrust,omitempty"`
+ SignedIdentity *PolicyIdentityApplyConfiguration `json:"signedIdentity,omitempty"`
+}
+
+// PolicyApplyConfiguration constructs an declarative configuration of the Policy type for use with
+// apply.
+func Policy() *PolicyApplyConfiguration {
+ return &PolicyApplyConfiguration{}
+}
+
+// WithRootOfTrust sets the RootOfTrust field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RootOfTrust field is set to the value of the last call.
+func (b *PolicyApplyConfiguration) WithRootOfTrust(value *PolicyRootOfTrustApplyConfiguration) *PolicyApplyConfiguration {
+ b.RootOfTrust = value
+ return b
+}
+
+// WithSignedIdentity sets the SignedIdentity field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SignedIdentity field is set to the value of the last call.
+func (b *PolicyApplyConfiguration) WithSignedIdentity(value *PolicyIdentityApplyConfiguration) *PolicyApplyConfiguration {
+ b.SignedIdentity = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyfulciosubject.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyfulciosubject.go
new file mode 100644
index 0000000000..98b0db5904
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyfulciosubject.go
@@ -0,0 +1,32 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// PolicyFulcioSubjectApplyConfiguration represents an declarative configuration of the PolicyFulcioSubject type for use
+// with apply.
+type PolicyFulcioSubjectApplyConfiguration struct {
+ OIDCIssuer *string `json:"oidcIssuer,omitempty"`
+ SignedEmail *string `json:"signedEmail,omitempty"`
+}
+
+// PolicyFulcioSubjectApplyConfiguration constructs an declarative configuration of the PolicyFulcioSubject type for use with
+// apply.
+func PolicyFulcioSubject() *PolicyFulcioSubjectApplyConfiguration {
+ return &PolicyFulcioSubjectApplyConfiguration{}
+}
+
+// WithOIDCIssuer sets the OIDCIssuer field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the OIDCIssuer field is set to the value of the last call.
+func (b *PolicyFulcioSubjectApplyConfiguration) WithOIDCIssuer(value string) *PolicyFulcioSubjectApplyConfiguration {
+ b.OIDCIssuer = &value
+ return b
+}
+
+// WithSignedEmail sets the SignedEmail field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SignedEmail field is set to the value of the last call.
+func (b *PolicyFulcioSubjectApplyConfiguration) WithSignedEmail(value string) *PolicyFulcioSubjectApplyConfiguration {
+ b.SignedEmail = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go
new file mode 100644
index 0000000000..ef4769d02d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyidentity.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// PolicyIdentityApplyConfiguration represents an declarative configuration of the PolicyIdentity type for use
+// with apply.
+type PolicyIdentityApplyConfiguration struct {
+ MatchPolicy *v1alpha1.IdentityMatchPolicy `json:"matchPolicy,omitempty"`
+ PolicyMatchExactRepository *PolicyMatchExactRepositoryApplyConfiguration `json:"exactRepository,omitempty"`
+ PolicyMatchRemapIdentity *PolicyMatchRemapIdentityApplyConfiguration `json:"remapIdentity,omitempty"`
+}
+
+// PolicyIdentityApplyConfiguration constructs an declarative configuration of the PolicyIdentity type for use with
+// apply.
+func PolicyIdentity() *PolicyIdentityApplyConfiguration {
+ return &PolicyIdentityApplyConfiguration{}
+}
+
+// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MatchPolicy field is set to the value of the last call.
+func (b *PolicyIdentityApplyConfiguration) WithMatchPolicy(value v1alpha1.IdentityMatchPolicy) *PolicyIdentityApplyConfiguration {
+ b.MatchPolicy = &value
+ return b
+}
+
+// WithPolicyMatchExactRepository sets the PolicyMatchExactRepository field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PolicyMatchExactRepository field is set to the value of the last call.
+func (b *PolicyIdentityApplyConfiguration) WithPolicyMatchExactRepository(value *PolicyMatchExactRepositoryApplyConfiguration) *PolicyIdentityApplyConfiguration {
+ b.PolicyMatchExactRepository = value
+ return b
+}
+
+// WithPolicyMatchRemapIdentity sets the PolicyMatchRemapIdentity field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PolicyMatchRemapIdentity field is set to the value of the last call.
+func (b *PolicyIdentityApplyConfiguration) WithPolicyMatchRemapIdentity(value *PolicyMatchRemapIdentityApplyConfiguration) *PolicyIdentityApplyConfiguration {
+ b.PolicyMatchRemapIdentity = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go
new file mode 100644
index 0000000000..6bf36d733e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchexactrepository.go
@@ -0,0 +1,27 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// PolicyMatchExactRepositoryApplyConfiguration represents an declarative configuration of the PolicyMatchExactRepository type for use
+// with apply.
+type PolicyMatchExactRepositoryApplyConfiguration struct {
+ Repository *v1alpha1.IdentityRepositoryPrefix `json:"repository,omitempty"`
+}
+
+// PolicyMatchExactRepositoryApplyConfiguration constructs an declarative configuration of the PolicyMatchExactRepository type for use with
+// apply.
+func PolicyMatchExactRepository() *PolicyMatchExactRepositoryApplyConfiguration {
+ return &PolicyMatchExactRepositoryApplyConfiguration{}
+}
+
+// WithRepository sets the Repository field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Repository field is set to the value of the last call.
+func (b *PolicyMatchExactRepositoryApplyConfiguration) WithRepository(value v1alpha1.IdentityRepositoryPrefix) *PolicyMatchExactRepositoryApplyConfiguration {
+ b.Repository = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go
new file mode 100644
index 0000000000..20ecf141ca
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policymatchremapidentity.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// PolicyMatchRemapIdentityApplyConfiguration represents an declarative configuration of the PolicyMatchRemapIdentity type for use
+// with apply.
+type PolicyMatchRemapIdentityApplyConfiguration struct {
+ Prefix *v1alpha1.IdentityRepositoryPrefix `json:"prefix,omitempty"`
+ SignedPrefix *v1alpha1.IdentityRepositoryPrefix `json:"signedPrefix,omitempty"`
+}
+
+// PolicyMatchRemapIdentityApplyConfiguration constructs an declarative configuration of the PolicyMatchRemapIdentity type for use with
+// apply.
+func PolicyMatchRemapIdentity() *PolicyMatchRemapIdentityApplyConfiguration {
+ return &PolicyMatchRemapIdentityApplyConfiguration{}
+}
+
+// WithPrefix sets the Prefix field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Prefix field is set to the value of the last call.
+func (b *PolicyMatchRemapIdentityApplyConfiguration) WithPrefix(value v1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration {
+ b.Prefix = &value
+ return b
+}
+
+// WithSignedPrefix sets the SignedPrefix field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SignedPrefix field is set to the value of the last call.
+func (b *PolicyMatchRemapIdentityApplyConfiguration) WithSignedPrefix(value v1alpha1.IdentityRepositoryPrefix) *PolicyMatchRemapIdentityApplyConfiguration {
+ b.SignedPrefix = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go
new file mode 100644
index 0000000000..cc442ddaea
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/policyrootoftrust.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// PolicyRootOfTrustApplyConfiguration represents an declarative configuration of the PolicyRootOfTrust type for use
+// with apply.
+type PolicyRootOfTrustApplyConfiguration struct {
+ PolicyType *v1alpha1.PolicyType `json:"policyType,omitempty"`
+ PublicKey *PublicKeyApplyConfiguration `json:"publicKey,omitempty"`
+ FulcioCAWithRekor *FulcioCAWithRekorApplyConfiguration `json:"fulcioCAWithRekor,omitempty"`
+}
+
+// PolicyRootOfTrustApplyConfiguration constructs an declarative configuration of the PolicyRootOfTrust type for use with
+// apply.
+func PolicyRootOfTrust() *PolicyRootOfTrustApplyConfiguration {
+ return &PolicyRootOfTrustApplyConfiguration{}
+}
+
+// WithPolicyType sets the PolicyType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PolicyType field is set to the value of the last call.
+func (b *PolicyRootOfTrustApplyConfiguration) WithPolicyType(value v1alpha1.PolicyType) *PolicyRootOfTrustApplyConfiguration {
+ b.PolicyType = &value
+ return b
+}
+
+// WithPublicKey sets the PublicKey field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PublicKey field is set to the value of the last call.
+func (b *PolicyRootOfTrustApplyConfiguration) WithPublicKey(value *PublicKeyApplyConfiguration) *PolicyRootOfTrustApplyConfiguration {
+ b.PublicKey = value
+ return b
+}
+
+// WithFulcioCAWithRekor sets the FulcioCAWithRekor field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FulcioCAWithRekor field is set to the value of the last call.
+func (b *PolicyRootOfTrustApplyConfiguration) WithFulcioCAWithRekor(value *FulcioCAWithRekorApplyConfiguration) *PolicyRootOfTrustApplyConfiguration {
+ b.FulcioCAWithRekor = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go
new file mode 100644
index 0000000000..0636a283ef
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/publickey.go
@@ -0,0 +1,36 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// PublicKeyApplyConfiguration represents an declarative configuration of the PublicKey type for use
+// with apply.
+type PublicKeyApplyConfiguration struct {
+ KeyData []byte `json:"keyData,omitempty"`
+ RekorKeyData []byte `json:"rekorKeyData,omitempty"`
+}
+
+// PublicKeyApplyConfiguration constructs an declarative configuration of the PublicKey type for use with
+// apply.
+func PublicKey() *PublicKeyApplyConfiguration {
+ return &PublicKeyApplyConfiguration{}
+}
+
+// WithKeyData adds the given value to the KeyData field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the KeyData field.
+func (b *PublicKeyApplyConfiguration) WithKeyData(values ...byte) *PublicKeyApplyConfiguration {
+ for i := range values {
+ b.KeyData = append(b.KeyData, values[i])
+ }
+ return b
+}
+
+// WithRekorKeyData adds the given value to the RekorKeyData field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the RekorKeyData field.
+func (b *PublicKeyApplyConfiguration) WithRekorKeyData(values ...byte) *PublicKeyApplyConfiguration {
+ for i := range values {
+ b.RekorKeyData = append(b.RekorKeyData, values[i])
+ }
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionnumberconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionnumberconfig.go
new file mode 100644
index 0000000000..833c540eca
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionnumberconfig.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// RetentionNumberConfigApplyConfiguration represents an declarative configuration of the RetentionNumberConfig type for use
+// with apply.
+type RetentionNumberConfigApplyConfiguration struct {
+ MaxNumberOfBackups *int `json:"maxNumberOfBackups,omitempty"`
+}
+
+// RetentionNumberConfigApplyConfiguration constructs an declarative configuration of the RetentionNumberConfig type for use with
+// apply.
+func RetentionNumberConfig() *RetentionNumberConfigApplyConfiguration {
+ return &RetentionNumberConfigApplyConfiguration{}
+}
+
+// WithMaxNumberOfBackups sets the MaxNumberOfBackups field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MaxNumberOfBackups field is set to the value of the last call.
+func (b *RetentionNumberConfigApplyConfiguration) WithMaxNumberOfBackups(value int) *RetentionNumberConfigApplyConfiguration {
+ b.MaxNumberOfBackups = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go
new file mode 100644
index 0000000000..7d0de95c97
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionpolicy.go
@@ -0,0 +1,45 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+)
+
+// RetentionPolicyApplyConfiguration represents an declarative configuration of the RetentionPolicy type for use
+// with apply.
+type RetentionPolicyApplyConfiguration struct {
+ RetentionType *v1alpha1.RetentionType `json:"retentionType,omitempty"`
+ RetentionNumber *RetentionNumberConfigApplyConfiguration `json:"retentionNumber,omitempty"`
+ RetentionSize *RetentionSizeConfigApplyConfiguration `json:"retentionSize,omitempty"`
+}
+
+// RetentionPolicyApplyConfiguration constructs an declarative configuration of the RetentionPolicy type for use with
+// apply.
+func RetentionPolicy() *RetentionPolicyApplyConfiguration {
+ return &RetentionPolicyApplyConfiguration{}
+}
+
+// WithRetentionType sets the RetentionType field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RetentionType field is set to the value of the last call.
+func (b *RetentionPolicyApplyConfiguration) WithRetentionType(value v1alpha1.RetentionType) *RetentionPolicyApplyConfiguration {
+ b.RetentionType = &value
+ return b
+}
+
+// WithRetentionNumber sets the RetentionNumber field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RetentionNumber field is set to the value of the last call.
+func (b *RetentionPolicyApplyConfiguration) WithRetentionNumber(value *RetentionNumberConfigApplyConfiguration) *RetentionPolicyApplyConfiguration {
+ b.RetentionNumber = value
+ return b
+}
+
+// WithRetentionSize sets the RetentionSize field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RetentionSize field is set to the value of the last call.
+func (b *RetentionPolicyApplyConfiguration) WithRetentionSize(value *RetentionSizeConfigApplyConfiguration) *RetentionPolicyApplyConfiguration {
+ b.RetentionSize = value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionsizeconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionsizeconfig.go
new file mode 100644
index 0000000000..50519b53a8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/retentionsizeconfig.go
@@ -0,0 +1,23 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// RetentionSizeConfigApplyConfiguration represents an declarative configuration of the RetentionSizeConfig type for use
+// with apply.
+type RetentionSizeConfigApplyConfiguration struct {
+ MaxSizeOfBackupsGb *int `json:"maxSizeOfBackupsGb,omitempty"`
+}
+
+// RetentionSizeConfigApplyConfiguration constructs an declarative configuration of the RetentionSizeConfig type for use with
+// apply.
+func RetentionSizeConfig() *RetentionSizeConfigApplyConfiguration {
+ return &RetentionSizeConfigApplyConfiguration{}
+}
+
+// WithMaxSizeOfBackupsGb sets the MaxSizeOfBackupsGb field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MaxSizeOfBackupsGb field is set to the value of the last call.
+func (b *RetentionSizeConfigApplyConfiguration) WithMaxSizeOfBackupsGb(value int) *RetentionSizeConfigApplyConfiguration {
+ b.MaxSizeOfBackupsGb = &value
+ return b
+}
diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go
new file mode 100644
index 0000000000..7e14fb8c97
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go
@@ -0,0 +1,4255 @@
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ typed "sigs.k8s.io/structured-merge-diff/v4/typed"
+)
+
+func Parser() *typed.Parser {
+ parserOnce.Do(func() {
+ var err error
+ parser, err = typed.NewParser(schemaYAML)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse schema: %v", err))
+ }
+ })
+ return parser
+}
+
+var parserOnce sync.Once
+var parser *typed.Parser
+var schemaYAML = typed.YAMLObject(`types:
+- name: com.github.openshift.api.config.v1.APIServer
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.APIServerSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.APIServerStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.APIServerEncryption
+ map:
+ fields:
+ - name: type
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.APIServerNamedServingCert
+ map:
+ fields:
+ - name: names
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: servingCertificate
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+- name: com.github.openshift.api.config.v1.APIServerServingCerts
+ map:
+ fields:
+ - name: namedCertificates
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.APIServerNamedServingCert
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.APIServerSpec
+ map:
+ fields:
+ - name: additionalCORSAllowedOrigins
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: audit
+ type:
+ namedType: com.github.openshift.api.config.v1.Audit
+ default: {}
+ - name: clientCA
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: encryption
+ type:
+ namedType: com.github.openshift.api.config.v1.APIServerEncryption
+ default: {}
+ - name: servingCerts
+ type:
+ namedType: com.github.openshift.api.config.v1.APIServerServingCerts
+ default: {}
+ - name: tlsSecurityProfile
+ type:
+ namedType: com.github.openshift.api.config.v1.TLSSecurityProfile
+- name: com.github.openshift.api.config.v1.APIServerStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.AWSDNSSpec
+ map:
+ fields:
+ - name: privateZoneIAMRole
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.AWSIngressSpec
+ map:
+ fields:
+ - name: type
+ type:
+ scalar: string
+ unions:
+ - discriminator: type
+- name: com.github.openshift.api.config.v1.AWSPlatformSpec
+ map:
+ fields:
+ - name: serviceEndpoints
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.AWSServiceEndpoint
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.AWSPlatformStatus
+ map:
+ fields:
+ - name: region
+ type:
+ scalar: string
+ default: ""
+ - name: resourceTags
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.AWSResourceTag
+ elementRelationship: atomic
+ - name: serviceEndpoints
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.AWSServiceEndpoint
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.AWSResourceTag
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: value
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.AWSServiceEndpoint
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.AlibabaCloudPlatformSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.AlibabaCloudPlatformStatus
+ map:
+ fields:
+ - name: region
+ type:
+ scalar: string
+ default: ""
+ - name: resourceGroupID
+ type:
+ scalar: string
+ - name: resourceTags
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.AlibabaCloudResourceTag
+ elementRelationship: associative
+ keys:
+ - key
+- name: com.github.openshift.api.config.v1.AlibabaCloudResourceTag
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: value
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Audit
+ map:
+ fields:
+ - name: customRules
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.AuditCustomRule
+ elementRelationship: associative
+ keys:
+ - group
+ - name: profile
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.AuditCustomRule
+ map:
+ fields:
+ - name: group
+ type:
+ scalar: string
+ default: ""
+ - name: profile
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.Authentication
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.AuthenticationSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.AuthenticationStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.AuthenticationSpec
+ map:
+ fields:
+ - name: oauthMetadata
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: oidcProviders
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.OIDCProvider
+ elementRelationship: associative
+ keys:
+ - name
+ - name: serviceAccountIssuer
+ type:
+ scalar: string
+ default: ""
+ - name: type
+ type:
+ scalar: string
+ default: ""
+ - name: webhookTokenAuthenticator
+ type:
+ namedType: com.github.openshift.api.config.v1.WebhookTokenAuthenticator
+ - name: webhookTokenAuthenticators
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.DeprecatedWebhookTokenAuthenticator
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.AuthenticationStatus
+ map:
+ fields:
+ - name: integratedOAuthMetadata
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: oidcClients
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.OIDCClientStatus
+ elementRelationship: associative
+ keys:
+ - componentNamespace
+ - componentName
+- name: com.github.openshift.api.config.v1.AzurePlatformSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.AzurePlatformStatus
+ map:
+ fields:
+ - name: armEndpoint
+ type:
+ scalar: string
+ - name: cloudName
+ type:
+ scalar: string
+ - name: networkResourceGroupName
+ type:
+ scalar: string
+ - name: resourceGroupName
+ type:
+ scalar: string
+ default: ""
+ - name: resourceTags
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.AzureResourceTag
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.AzureResourceTag
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: value
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.BareMetalPlatformLoadBalancer
+ map:
+ fields:
+ - name: type
+ type:
+ scalar: string
+ default: OpenShiftManagedDefault
+ unions:
+ - discriminator: type
+- name: com.github.openshift.api.config.v1.BareMetalPlatformSpec
+ map:
+ fields:
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: machineNetworks
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.BareMetalPlatformStatus
+ map:
+ fields:
+ - name: apiServerInternalIP
+ type:
+ scalar: string
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: ingressIP
+ type:
+ scalar: string
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: loadBalancer
+ type:
+ namedType: com.github.openshift.api.config.v1.BareMetalPlatformLoadBalancer
+ default:
+ type: OpenShiftManagedDefault
+ - name: machineNetworks
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: nodeDNSIP
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.BasicAuthIdentityProvider
+ map:
+ fields:
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: tlsClientCert
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: tlsClientKey
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Build
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.BuildSpec
+ default: {}
+- name: com.github.openshift.api.config.v1.BuildDefaults
+ map:
+ fields:
+ - name: defaultProxy
+ type:
+ namedType: com.github.openshift.api.config.v1.ProxySpec
+ - name: env
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.EnvVar
+ elementRelationship: atomic
+ - name: gitProxy
+ type:
+ namedType: com.github.openshift.api.config.v1.ProxySpec
+ - name: imageLabels
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ImageLabel
+ elementRelationship: atomic
+ - name: resources
+ type:
+ namedType: io.k8s.api.core.v1.ResourceRequirements
+ default: {}
+- name: com.github.openshift.api.config.v1.BuildOverrides
+ map:
+ fields:
+ - name: forcePull
+ type:
+ scalar: boolean
+ - name: imageLabels
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ImageLabel
+ elementRelationship: atomic
+ - name: nodeSelector
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: tolerations
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Toleration
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.BuildSpec
+ map:
+ fields:
+ - name: additionalTrustedCA
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: buildDefaults
+ type:
+ namedType: com.github.openshift.api.config.v1.BuildDefaults
+ default: {}
+ - name: buildOverrides
+ type:
+ namedType: com.github.openshift.api.config.v1.BuildOverrides
+ default: {}
+- name: com.github.openshift.api.config.v1.CloudControllerManagerStatus
+ map:
+ fields:
+ - name: state
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.CloudLoadBalancerConfig
+ map:
+ fields:
+ - name: clusterHosted
+ type:
+ namedType: com.github.openshift.api.config.v1.CloudLoadBalancerIPs
+ - name: dnsType
+ type:
+ scalar: string
+ default: PlatformDefault
+ unions:
+ - discriminator: dnsType
+ fields:
+ - fieldName: clusterHosted
+ discriminatorValue: ClusterHosted
+- name: com.github.openshift.api.config.v1.CloudLoadBalancerIPs
+ map:
+ fields:
+ - name: apiIntLoadBalancerIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: apiLoadBalancerIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: ingressLoadBalancerIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: com.github.openshift.api.config.v1.ClusterCondition
+ map:
+ fields:
+ - name: promql
+ type:
+ namedType: com.github.openshift.api.config.v1.PromQLClusterCondition
+ - name: type
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.ClusterNetworkEntry
+ map:
+ fields:
+ - name: cidr
+ type:
+ scalar: string
+ default: ""
+ - name: hostPrefix
+ type:
+ scalar: numeric
+- name: com.github.openshift.api.config.v1.ClusterOperator
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ClusterOperatorSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ClusterOperatorStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ClusterOperatorSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.ClusterOperatorStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ClusterOperatorStatusCondition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: extension
+ type:
+ namedType: __untyped_atomic_
+ default: {}
+ - name: relatedObjects
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ObjectReference
+ elementRelationship: atomic
+ - name: versions
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.OperandVersion
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.ClusterOperatorStatusCondition
+ map:
+ fields:
+ - name: lastTransitionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ default: {}
+ - name: message
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+ default: ""
+ - name: type
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.ClusterVersion
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ClusterVersionSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ClusterVersionStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesSpec
+ map:
+ fields:
+ - name: additionalEnabledCapabilities
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: baselineCapabilitySet
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesStatus
+ map:
+ fields:
+ - name: enabledCapabilities
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: knownCapabilities
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.ClusterVersionSpec
+ map:
+ fields:
+ - name: capabilities
+ type:
+ namedType: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesSpec
+ - name: channel
+ type:
+ scalar: string
+ - name: clusterID
+ type:
+ scalar: string
+ default: ""
+ - name: desiredUpdate
+ type:
+ namedType: com.github.openshift.api.config.v1.Update
+ - name: overrides
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ComponentOverride
+ elementRelationship: associative
+ keys:
+ - kind
+ - group
+ - namespace
+ - name
+ - name: signatureStores
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.SignatureStore
+ elementRelationship: associative
+ keys:
+ - url
+ - name: upstream
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.ClusterVersionStatus
+ map:
+ fields:
+ - name: availableUpdates
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.Release
+ elementRelationship: atomic
+ - name: capabilities
+ type:
+ namedType: com.github.openshift.api.config.v1.ClusterVersionCapabilitiesStatus
+ default: {}
+ - name: conditionalUpdates
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ConditionalUpdate
+ elementRelationship: atomic
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ClusterOperatorStatusCondition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: desired
+ type:
+ namedType: com.github.openshift.api.config.v1.Release
+ default: {}
+ - name: history
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.UpdateHistory
+ elementRelationship: atomic
+ - name: observedGeneration
+ type:
+ scalar: numeric
+ default: 0
+ - name: versionHash
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.ComponentOverride
+ map:
+ fields:
+ - name: group
+ type:
+ scalar: string
+ default: ""
+ - name: kind
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: namespace
+ type:
+ scalar: string
+ default: ""
+ - name: unmanaged
+ type:
+ scalar: boolean
+ default: false
+- name: com.github.openshift.api.config.v1.ComponentRouteSpec
+ map:
+ fields:
+ - name: hostname
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: namespace
+ type:
+ scalar: string
+ default: ""
+ - name: servingCertKeyPairSecret
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+- name: com.github.openshift.api.config.v1.ComponentRouteStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: consumingUsers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: currentHostnames
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: defaultHostname
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: namespace
+ type:
+ scalar: string
+ default: ""
+ - name: relatedObjects
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ObjectReference
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.ConditionalUpdate
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: release
+ type:
+ namedType: com.github.openshift.api.config.v1.Release
+ default: {}
+ - name: risks
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ConditionalUpdateRisk
+ elementRelationship: associative
+ keys:
+ - name
+- name: com.github.openshift.api.config.v1.ConditionalUpdateRisk
+ map:
+ fields:
+ - name: matchingRules
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ClusterCondition
+ elementRelationship: atomic
+ - name: message
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.ConfigMapFileReference
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.ConfigMapNameReference
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Console
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ConsoleSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ConsoleStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ConsoleAuthentication
+ map:
+ fields:
+ - name: logoutRedirect
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.ConsoleSpec
+ map:
+ fields:
+ - name: authentication
+ type:
+ namedType: com.github.openshift.api.config.v1.ConsoleAuthentication
+ default: {}
+- name: com.github.openshift.api.config.v1.ConsoleStatus
+ map:
+ fields:
+ - name: consoleURL
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.CustomFeatureGates
+ map:
+ fields:
+ - name: disabled
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: enabled
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.CustomTLSProfile
+ map:
+ fields:
+ - name: ciphers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: minTLSVersion
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.DNS
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.DNSSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.DNSStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.DNSPlatformSpec
+ map:
+ fields:
+ - name: aws
+ type:
+ namedType: com.github.openshift.api.config.v1.AWSDNSSpec
+ - name: type
+ type:
+ scalar: string
+ default: ""
+ unions:
+ - discriminator: type
+ fields:
+ - fieldName: aws
+ discriminatorValue: AWS
+- name: com.github.openshift.api.config.v1.DNSSpec
+ map:
+ fields:
+ - name: baseDomain
+ type:
+ scalar: string
+ default: ""
+ - name: platform
+ type:
+ namedType: com.github.openshift.api.config.v1.DNSPlatformSpec
+ default: {}
+ - name: privateZone
+ type:
+ namedType: com.github.openshift.api.config.v1.DNSZone
+ - name: publicZone
+ type:
+ namedType: com.github.openshift.api.config.v1.DNSZone
+- name: com.github.openshift.api.config.v1.DNSStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.DNSZone
+ map:
+ fields:
+ - name: id
+ type:
+ scalar: string
+ - name: tags
+ type:
+ map:
+ elementType:
+ scalar: string
+- name: com.github.openshift.api.config.v1.DeprecatedWebhookTokenAuthenticator
+ map:
+ fields:
+ - name: kubeConfig
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+- name: com.github.openshift.api.config.v1.EquinixMetalPlatformSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.EquinixMetalPlatformStatus
+ map:
+ fields:
+ - name: apiServerInternalIP
+ type:
+ scalar: string
+ - name: ingressIP
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.ExternalIPConfig
+ map:
+ fields:
+ - name: autoAssignCIDRs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: policy
+ type:
+ namedType: com.github.openshift.api.config.v1.ExternalIPPolicy
+- name: com.github.openshift.api.config.v1.ExternalIPPolicy
+ map:
+ fields:
+ - name: allowedCIDRs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: rejectedCIDRs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.ExternalPlatformSpec
+ map:
+ fields:
+ - name: platformName
+ type:
+ scalar: string
+ default: Unknown
+- name: com.github.openshift.api.config.v1.ExternalPlatformStatus
+ map:
+ fields:
+ - name: cloudControllerManager
+ type:
+ namedType: com.github.openshift.api.config.v1.CloudControllerManagerStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.FeatureGate
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.FeatureGateSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.FeatureGateStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.FeatureGateAttributes
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.FeatureGateDetails
+ map:
+ fields:
+ - name: disabled
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.FeatureGateAttributes
+ elementRelationship: atomic
+ - name: enabled
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.FeatureGateAttributes
+ elementRelationship: atomic
+ - name: version
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.FeatureGateSpec
+ map:
+ fields:
+ - name: customNoUpgrade
+ type:
+ namedType: com.github.openshift.api.config.v1.CustomFeatureGates
+ - name: featureSet
+ type:
+ scalar: string
+ unions:
+ - discriminator: featureSet
+ fields:
+ - fieldName: customNoUpgrade
+ discriminatorValue: CustomNoUpgrade
+- name: com.github.openshift.api.config.v1.FeatureGateStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: featureGates
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.FeatureGateDetails
+ elementRelationship: associative
+ keys:
+ - version
+- name: com.github.openshift.api.config.v1.GCPPlatformSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.GCPPlatformStatus
+ map:
+ fields:
+ - name: cloudLoadBalancerConfig
+ type:
+ namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig
+ default:
+ dnsType: PlatformDefault
+ - name: projectID
+ type:
+ scalar: string
+ default: ""
+ - name: region
+ type:
+ scalar: string
+ default: ""
+ - name: resourceLabels
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.GCPResourceLabel
+ elementRelationship: associative
+ keys:
+ - key
+ - name: resourceTags
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.GCPResourceTag
+ elementRelationship: associative
+ keys:
+ - key
+- name: com.github.openshift.api.config.v1.GCPResourceLabel
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: value
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.GCPResourceTag
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: parentID
+ type:
+ scalar: string
+ default: ""
+ - name: value
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.GitHubIdentityProvider
+ map:
+ fields:
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: clientID
+ type:
+ scalar: string
+ default: ""
+ - name: clientSecret
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: hostname
+ type:
+ scalar: string
+ default: ""
+ - name: organizations
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: teams
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.GitLabIdentityProvider
+ map:
+ fields:
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: clientID
+ type:
+ scalar: string
+ default: ""
+ - name: clientSecret
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.GoogleIdentityProvider
+ map:
+ fields:
+ - name: clientID
+ type:
+ scalar: string
+ default: ""
+ - name: clientSecret
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: hostedDomain
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.HTPasswdIdentityProvider
+ map:
+ fields:
+ - name: fileData
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+- name: com.github.openshift.api.config.v1.HubSource
+ map:
+ fields:
+ - name: disabled
+ type:
+ scalar: boolean
+ default: false
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.HubSourceStatus
+ map:
+ fields:
+ - name: message
+ type:
+ scalar: string
+ - name: status
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.IBMCloudPlatformSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.IBMCloudPlatformStatus
+ map:
+ fields:
+ - name: cisInstanceCRN
+ type:
+ scalar: string
+ - name: dnsInstanceCRN
+ type:
+ scalar: string
+ - name: location
+ type:
+ scalar: string
+ - name: providerType
+ type:
+ scalar: string
+ - name: resourceGroupName
+ type:
+ scalar: string
+ - name: serviceEndpoints
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.IBMCloudServiceEndpoint
+ elementRelationship: associative
+ keys:
+ - name
+- name: com.github.openshift.api.config.v1.IBMCloudServiceEndpoint
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.IdentityProvider
+ map:
+ fields:
+ - name: basicAuth
+ type:
+ namedType: com.github.openshift.api.config.v1.BasicAuthIdentityProvider
+ - name: github
+ type:
+ namedType: com.github.openshift.api.config.v1.GitHubIdentityProvider
+ - name: gitlab
+ type:
+ namedType: com.github.openshift.api.config.v1.GitLabIdentityProvider
+ - name: google
+ type:
+ namedType: com.github.openshift.api.config.v1.GoogleIdentityProvider
+ - name: htpasswd
+ type:
+ namedType: com.github.openshift.api.config.v1.HTPasswdIdentityProvider
+ - name: keystone
+ type:
+ namedType: com.github.openshift.api.config.v1.KeystoneIdentityProvider
+ - name: ldap
+ type:
+ namedType: com.github.openshift.api.config.v1.LDAPIdentityProvider
+ - name: mappingMethod
+ type:
+ scalar: string
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: openID
+ type:
+ namedType: com.github.openshift.api.config.v1.OpenIDIdentityProvider
+ - name: requestHeader
+ type:
+ namedType: com.github.openshift.api.config.v1.RequestHeaderIdentityProvider
+ - name: type
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Image
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ImageSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ImageStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ImageContentPolicy
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ImageContentPolicySpec
+ default: {}
+- name: com.github.openshift.api.config.v1.ImageContentPolicySpec
+ map:
+ fields:
+ - name: repositoryDigestMirrors
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.RepositoryDigestMirrors
+ elementRelationship: associative
+ keys:
+ - source
+- name: com.github.openshift.api.config.v1.ImageDigestMirrorSet
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ImageDigestMirrorSetSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ImageDigestMirrorSetStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ImageDigestMirrorSetSpec
+ map:
+ fields:
+ - name: imageDigestMirrors
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ImageDigestMirrors
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.ImageDigestMirrorSetStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.ImageDigestMirrors
+ map:
+ fields:
+ - name: mirrorSourcePolicy
+ type:
+ scalar: string
+ - name: mirrors
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: source
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.ImageLabel
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: value
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.ImageSpec
+ map:
+ fields:
+ - name: additionalTrustedCA
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: allowedRegistriesForImport
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.RegistryLocation
+ elementRelationship: atomic
+ - name: externalRegistryHostnames
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: registrySources
+ type:
+ namedType: com.github.openshift.api.config.v1.RegistrySources
+ default: {}
+- name: com.github.openshift.api.config.v1.ImageStatus
+ map:
+ fields:
+ - name: externalRegistryHostnames
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: internalRegistryHostname
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.ImageTagMirrorSet
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ImageTagMirrorSetSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ImageTagMirrorSetStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ImageTagMirrorSetSpec
+ map:
+ fields:
+ - name: imageTagMirrors
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ImageTagMirrors
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.ImageTagMirrorSetStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.ImageTagMirrors
+ map:
+ fields:
+ - name: mirrorSourcePolicy
+ type:
+ scalar: string
+ - name: mirrors
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: source
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Infrastructure
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.InfrastructureSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.InfrastructureStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.InfrastructureSpec
+ map:
+ fields:
+ - name: cloudConfig
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapFileReference
+ default: {}
+ - name: platformSpec
+ type:
+ namedType: com.github.openshift.api.config.v1.PlatformSpec
+ default: {}
+- name: com.github.openshift.api.config.v1.InfrastructureStatus
+ map:
+ fields:
+ - name: apiServerInternalURI
+ type:
+ scalar: string
+ default: ""
+ - name: apiServerURL
+ type:
+ scalar: string
+ default: ""
+ - name: controlPlaneTopology
+ type:
+ scalar: string
+ default: ""
+ - name: cpuPartitioning
+ type:
+ scalar: string
+ default: None
+ - name: etcdDiscoveryDomain
+ type:
+ scalar: string
+ default: ""
+ - name: infrastructureName
+ type:
+ scalar: string
+ default: ""
+ - name: infrastructureTopology
+ type:
+ scalar: string
+ default: ""
+ - name: platform
+ type:
+ scalar: string
+ - name: platformStatus
+ type:
+ namedType: com.github.openshift.api.config.v1.PlatformStatus
+- name: com.github.openshift.api.config.v1.Ingress
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.IngressSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.IngressStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.IngressPlatformSpec
+ map:
+ fields:
+ - name: aws
+ type:
+ namedType: com.github.openshift.api.config.v1.AWSIngressSpec
+ - name: type
+ type:
+ scalar: string
+ default: ""
+ unions:
+ - discriminator: type
+ fields:
+ - fieldName: aws
+ discriminatorValue: AWS
+- name: com.github.openshift.api.config.v1.IngressSpec
+ map:
+ fields:
+ - name: appsDomain
+ type:
+ scalar: string
+ - name: componentRoutes
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ComponentRouteSpec
+ elementRelationship: associative
+ keys:
+ - namespace
+ - name
+ - name: domain
+ type:
+ scalar: string
+ default: ""
+ - name: loadBalancer
+ type:
+ namedType: com.github.openshift.api.config.v1.LoadBalancer
+ default: {}
+ - name: requiredHSTSPolicies
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.RequiredHSTSPolicy
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.IngressStatus
+ map:
+ fields:
+ - name: componentRoutes
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ComponentRouteStatus
+ elementRelationship: associative
+ keys:
+ - namespace
+ - name
+ - name: defaultPlacement
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.IntermediateTLSProfile
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.KeystoneIdentityProvider
+ map:
+ fields:
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: domainName
+ type:
+ scalar: string
+ default: ""
+ - name: tlsClientCert
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: tlsClientKey
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.KubevirtPlatformSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.KubevirtPlatformStatus
+ map:
+ fields:
+ - name: apiServerInternalIP
+ type:
+ scalar: string
+ - name: ingressIP
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.LDAPAttributeMapping
+ map:
+ fields:
+ - name: email
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: id
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: name
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: preferredUsername
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.LDAPIdentityProvider
+ map:
+ fields:
+ - name: attributes
+ type:
+ namedType: com.github.openshift.api.config.v1.LDAPAttributeMapping
+ default: {}
+ - name: bindDN
+ type:
+ scalar: string
+ default: ""
+ - name: bindPassword
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: insecure
+ type:
+ scalar: boolean
+ default: false
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.LoadBalancer
+ map:
+ fields:
+ - name: platform
+ type:
+ namedType: com.github.openshift.api.config.v1.IngressPlatformSpec
+ default: {}
+- name: com.github.openshift.api.config.v1.MTUMigration
+ map:
+ fields:
+ - name: machine
+ type:
+ namedType: com.github.openshift.api.config.v1.MTUMigrationValues
+ - name: network
+ type:
+ namedType: com.github.openshift.api.config.v1.MTUMigrationValues
+- name: com.github.openshift.api.config.v1.MTUMigrationValues
+ map:
+ fields:
+ - name: from
+ type:
+ scalar: numeric
+ - name: to
+ type:
+ scalar: numeric
+- name: com.github.openshift.api.config.v1.MaxAgePolicy
+ map:
+ fields:
+ - name: largestMaxAge
+ type:
+ scalar: numeric
+ - name: smallestMaxAge
+ type:
+ scalar: numeric
+- name: com.github.openshift.api.config.v1.ModernTLSProfile
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.Network
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.NetworkSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.NetworkStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.NetworkDiagnostics
+ map:
+ fields:
+ - name: mode
+ type:
+ scalar: string
+ default: ""
+ - name: sourcePlacement
+ type:
+ namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement
+ default: {}
+ - name: targetPlacement
+ type:
+ namedType: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement
+ default: {}
+- name: com.github.openshift.api.config.v1.NetworkDiagnosticsSourcePlacement
+ map:
+ fields:
+ - name: nodeSelector
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: tolerations
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Toleration
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.NetworkDiagnosticsTargetPlacement
+ map:
+ fields:
+ - name: nodeSelector
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: tolerations
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.Toleration
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.NetworkMigration
+ map:
+ fields:
+ - name: mtu
+ type:
+ namedType: com.github.openshift.api.config.v1.MTUMigration
+ - name: networkType
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.NetworkSpec
+ map:
+ fields:
+ - name: clusterNetwork
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ClusterNetworkEntry
+ elementRelationship: atomic
+ - name: externalIP
+ type:
+ namedType: com.github.openshift.api.config.v1.ExternalIPConfig
+ - name: networkDiagnostics
+ type:
+ namedType: com.github.openshift.api.config.v1.NetworkDiagnostics
+ default: {}
+ - name: networkType
+ type:
+ scalar: string
+ default: ""
+ - name: serviceNetwork
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: serviceNodePortRange
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.NetworkStatus
+ map:
+ fields:
+ - name: clusterNetwork
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.ClusterNetworkEntry
+ elementRelationship: atomic
+ - name: clusterNetworkMTU
+ type:
+ scalar: numeric
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: migration
+ type:
+ namedType: com.github.openshift.api.config.v1.NetworkMigration
+ - name: networkType
+ type:
+ scalar: string
+ - name: serviceNetwork
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.Node
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.NodeSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.NodeStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.NodeSpec
+ map:
+ fields:
+ - name: cgroupMode
+ type:
+ scalar: string
+ - name: workerLatencyProfile
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.NodeStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.NutanixFailureDomain
+ map:
+ fields:
+ - name: cluster
+ type:
+ namedType: com.github.openshift.api.config.v1.NutanixResourceIdentifier
+ default: {}
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: subnets
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.NutanixResourceIdentifier
+ elementRelationship: associative
+ keys:
+ - type
+- name: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer
+ map:
+ fields:
+ - name: type
+ type:
+ scalar: string
+ default: OpenShiftManagedDefault
+ unions:
+ - discriminator: type
+- name: com.github.openshift.api.config.v1.NutanixPlatformSpec
+ map:
+ fields:
+ - name: failureDomains
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.NutanixFailureDomain
+ elementRelationship: associative
+ keys:
+ - name
+ - name: prismCentral
+ type:
+ namedType: com.github.openshift.api.config.v1.NutanixPrismEndpoint
+ default: {}
+ - name: prismElements
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.NutanixPrismElementEndpoint
+ elementRelationship: associative
+ keys:
+ - name
+- name: com.github.openshift.api.config.v1.NutanixPlatformStatus
+ map:
+ fields:
+ - name: apiServerInternalIP
+ type:
+ scalar: string
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: ingressIP
+ type:
+ scalar: string
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: loadBalancer
+ type:
+ namedType: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer
+ default:
+ type: OpenShiftManagedDefault
+- name: com.github.openshift.api.config.v1.NutanixPrismElementEndpoint
+ map:
+ fields:
+ - name: endpoint
+ type:
+ namedType: com.github.openshift.api.config.v1.NutanixPrismEndpoint
+ default: {}
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.NutanixPrismEndpoint
+ map:
+ fields:
+ - name: address
+ type:
+ scalar: string
+ default: ""
+ - name: port
+ type:
+ scalar: numeric
+ default: 0
+- name: com.github.openshift.api.config.v1.NutanixResourceIdentifier
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: type
+ type:
+ scalar: string
+ default: ""
+ - name: uuid
+ type:
+ scalar: string
+ unions:
+ - discriminator: type
+ fields:
+ - fieldName: name
+ discriminatorValue: Name
+ - fieldName: uuid
+ discriminatorValue: UUID
+- name: com.github.openshift.api.config.v1.OAuth
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.OAuthSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.OAuthStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.OAuthSpec
+ map:
+ fields:
+ - name: identityProviders
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.IdentityProvider
+ elementRelationship: atomic
+ - name: templates
+ type:
+ namedType: com.github.openshift.api.config.v1.OAuthTemplates
+ default: {}
+ - name: tokenConfig
+ type:
+ namedType: com.github.openshift.api.config.v1.TokenConfig
+ default: {}
+- name: com.github.openshift.api.config.v1.OAuthStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.OAuthTemplates
+ map:
+ fields:
+ - name: error
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: login
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: providerSelection
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+- name: com.github.openshift.api.config.v1.OIDCClientConfig
+ map:
+ fields:
+ - name: clientID
+ type:
+ scalar: string
+ default: ""
+ - name: clientSecret
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: componentName
+ type:
+ scalar: string
+ default: ""
+ - name: componentNamespace
+ type:
+ scalar: string
+ default: ""
+ - name: extraScopes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: com.github.openshift.api.config.v1.OIDCClientReference
+ map:
+ fields:
+ - name: clientID
+ type:
+ scalar: string
+ default: ""
+ - name: issuerURL
+ type:
+ scalar: string
+ default: ""
+ - name: oidcProviderName
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.OIDCClientStatus
+ map:
+ fields:
+ - name: componentName
+ type:
+ scalar: string
+ default: ""
+ - name: componentNamespace
+ type:
+ scalar: string
+ default: ""
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: consumingUsers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: currentOIDCClients
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.OIDCClientReference
+ elementRelationship: associative
+ keys:
+ - issuerURL
+ - clientID
+- name: com.github.openshift.api.config.v1.OIDCProvider
+ map:
+ fields:
+ - name: claimMappings
+ type:
+ namedType: com.github.openshift.api.config.v1.TokenClaimMappings
+ default: {}
+ - name: claimValidationRules
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.TokenClaimValidationRule
+ elementRelationship: atomic
+ - name: issuer
+ type:
+ namedType: com.github.openshift.api.config.v1.TokenIssuer
+ default: {}
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: oidcClients
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.OIDCClientConfig
+ elementRelationship: associative
+ keys:
+ - componentNamespace
+ - componentName
+- name: com.github.openshift.api.config.v1.ObjectReference
+ map:
+ fields:
+ - name: group
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: namespace
+ type:
+ scalar: string
+ - name: resource
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.OldTLSProfile
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.OpenIDClaims
+ map:
+ fields:
+ - name: email
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: groups
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: name
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: preferredUsername
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.OpenIDIdentityProvider
+ map:
+ fields:
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: claims
+ type:
+ namedType: com.github.openshift.api.config.v1.OpenIDClaims
+ default: {}
+ - name: clientID
+ type:
+ scalar: string
+ default: ""
+ - name: clientSecret
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+ - name: extraAuthorizeParameters
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: extraScopes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: issuer
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.OpenStackPlatformLoadBalancer
+ map:
+ fields:
+ - name: type
+ type:
+ scalar: string
+ default: OpenShiftManagedDefault
+ unions:
+ - discriminator: type
+- name: com.github.openshift.api.config.v1.OpenStackPlatformSpec
+ map:
+ fields:
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: machineNetworks
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.OpenStackPlatformStatus
+ map:
+ fields:
+ - name: apiServerInternalIP
+ type:
+ scalar: string
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: cloudName
+ type:
+ scalar: string
+ - name: ingressIP
+ type:
+ scalar: string
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: loadBalancer
+ type:
+ namedType: com.github.openshift.api.config.v1.OpenStackPlatformLoadBalancer
+ default:
+ type: OpenShiftManagedDefault
+ - name: machineNetworks
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: nodeDNSIP
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.OperandVersion
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: version
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.OperatorHub
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.OperatorHubSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.OperatorHubStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.OperatorHubSpec
+ map:
+ fields:
+ - name: disableAllDefaultSources
+ type:
+ scalar: boolean
+ - name: sources
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.HubSource
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.OperatorHubStatus
+ map:
+ fields:
+ - name: sources
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.HubSourceStatus
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.OvirtPlatformLoadBalancer
+ map:
+ fields:
+ - name: type
+ type:
+ scalar: string
+ default: OpenShiftManagedDefault
+ unions:
+ - discriminator: type
+- name: com.github.openshift.api.config.v1.OvirtPlatformSpec
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.OvirtPlatformStatus
+ map:
+ fields:
+ - name: apiServerInternalIP
+ type:
+ scalar: string
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: ingressIP
+ type:
+ scalar: string
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: loadBalancer
+ type:
+ namedType: com.github.openshift.api.config.v1.OvirtPlatformLoadBalancer
+ default:
+ type: OpenShiftManagedDefault
+ - name: nodeDNSIP
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.PlatformSpec
+ map:
+ fields:
+ - name: alibabaCloud
+ type:
+ namedType: com.github.openshift.api.config.v1.AlibabaCloudPlatformSpec
+ - name: aws
+ type:
+ namedType: com.github.openshift.api.config.v1.AWSPlatformSpec
+ - name: azure
+ type:
+ namedType: com.github.openshift.api.config.v1.AzurePlatformSpec
+ - name: baremetal
+ type:
+ namedType: com.github.openshift.api.config.v1.BareMetalPlatformSpec
+ - name: equinixMetal
+ type:
+ namedType: com.github.openshift.api.config.v1.EquinixMetalPlatformSpec
+ - name: external
+ type:
+ namedType: com.github.openshift.api.config.v1.ExternalPlatformSpec
+ - name: gcp
+ type:
+ namedType: com.github.openshift.api.config.v1.GCPPlatformSpec
+ - name: ibmcloud
+ type:
+ namedType: com.github.openshift.api.config.v1.IBMCloudPlatformSpec
+ - name: kubevirt
+ type:
+ namedType: com.github.openshift.api.config.v1.KubevirtPlatformSpec
+ - name: nutanix
+ type:
+ namedType: com.github.openshift.api.config.v1.NutanixPlatformSpec
+ - name: openstack
+ type:
+ namedType: com.github.openshift.api.config.v1.OpenStackPlatformSpec
+ - name: ovirt
+ type:
+ namedType: com.github.openshift.api.config.v1.OvirtPlatformSpec
+ - name: powervs
+ type:
+ namedType: com.github.openshift.api.config.v1.PowerVSPlatformSpec
+ - name: type
+ type:
+ scalar: string
+ default: ""
+ - name: vsphere
+ type:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformSpec
+- name: com.github.openshift.api.config.v1.PlatformStatus
+ map:
+ fields:
+ - name: alibabaCloud
+ type:
+ namedType: com.github.openshift.api.config.v1.AlibabaCloudPlatformStatus
+ - name: aws
+ type:
+ namedType: com.github.openshift.api.config.v1.AWSPlatformStatus
+ - name: azure
+ type:
+ namedType: com.github.openshift.api.config.v1.AzurePlatformStatus
+ - name: baremetal
+ type:
+ namedType: com.github.openshift.api.config.v1.BareMetalPlatformStatus
+ - name: equinixMetal
+ type:
+ namedType: com.github.openshift.api.config.v1.EquinixMetalPlatformStatus
+ - name: external
+ type:
+ namedType: com.github.openshift.api.config.v1.ExternalPlatformStatus
+ - name: gcp
+ type:
+ namedType: com.github.openshift.api.config.v1.GCPPlatformStatus
+ - name: ibmcloud
+ type:
+ namedType: com.github.openshift.api.config.v1.IBMCloudPlatformStatus
+ - name: kubevirt
+ type:
+ namedType: com.github.openshift.api.config.v1.KubevirtPlatformStatus
+ - name: nutanix
+ type:
+ namedType: com.github.openshift.api.config.v1.NutanixPlatformStatus
+ - name: openstack
+ type:
+ namedType: com.github.openshift.api.config.v1.OpenStackPlatformStatus
+ - name: ovirt
+ type:
+ namedType: com.github.openshift.api.config.v1.OvirtPlatformStatus
+ - name: powervs
+ type:
+ namedType: com.github.openshift.api.config.v1.PowerVSPlatformStatus
+ - name: type
+ type:
+ scalar: string
+ default: ""
+ - name: vsphere
+ type:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformStatus
+- name: com.github.openshift.api.config.v1.PowerVSPlatformSpec
+ map:
+ fields:
+ - name: serviceEndpoints
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.PowerVSServiceEndpoint
+ elementRelationship: associative
+ keys:
+ - name
+- name: com.github.openshift.api.config.v1.PowerVSPlatformStatus
+ map:
+ fields:
+ - name: cisInstanceCRN
+ type:
+ scalar: string
+ - name: dnsInstanceCRN
+ type:
+ scalar: string
+ - name: region
+ type:
+ scalar: string
+ default: ""
+ - name: resourceGroup
+ type:
+ scalar: string
+ default: ""
+ - name: serviceEndpoints
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.PowerVSServiceEndpoint
+ elementRelationship: associative
+ keys:
+ - name
+ - name: zone
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.PowerVSServiceEndpoint
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.PrefixedClaimMapping
+ map:
+ fields:
+ - name: claim
+ type:
+ scalar: string
+ default: ""
+ - name: prefix
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.ProfileCustomizations
+ map:
+ fields:
+ - name: dynamicResourceAllocation
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Project
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ProjectSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ProjectStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ProjectSpec
+ map:
+ fields:
+ - name: projectRequestMessage
+ type:
+ scalar: string
+ default: ""
+ - name: projectRequestTemplate
+ type:
+ namedType: com.github.openshift.api.config.v1.TemplateReference
+ default: {}
+- name: com.github.openshift.api.config.v1.ProjectStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.PromQLClusterCondition
+ map:
+ fields:
+ - name: promql
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Proxy
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.ProxySpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.ProxyStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.ProxySpec
+ map:
+ fields:
+ - name: httpProxy
+ type:
+ scalar: string
+ - name: httpsProxy
+ type:
+ scalar: string
+ - name: noProxy
+ type:
+ scalar: string
+ - name: readinessEndpoints
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: trustedCA
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+- name: com.github.openshift.api.config.v1.ProxyStatus
+ map:
+ fields:
+ - name: httpProxy
+ type:
+ scalar: string
+ - name: httpsProxy
+ type:
+ scalar: string
+ - name: noProxy
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.RegistryLocation
+ map:
+ fields:
+ - name: domainName
+ type:
+ scalar: string
+ default: ""
+ - name: insecure
+ type:
+ scalar: boolean
+- name: com.github.openshift.api.config.v1.RegistrySources
+ map:
+ fields:
+ - name: allowedRegistries
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: blockedRegistries
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: containerRuntimeSearchRegistries
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: insecureRegistries
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.Release
+ map:
+ fields:
+ - name: channels
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: image
+ type:
+ scalar: string
+ default: ""
+ - name: url
+ type:
+ scalar: string
+ - name: version
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.RepositoryDigestMirrors
+ map:
+ fields:
+ - name: allowMirrorByTags
+ type:
+ scalar: boolean
+ - name: mirrors
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: source
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.RequestHeaderIdentityProvider
+ map:
+ fields:
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: challengeURL
+ type:
+ scalar: string
+ default: ""
+ - name: clientCommonNames
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: emailHeaders
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: headers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: loginURL
+ type:
+ scalar: string
+ default: ""
+ - name: nameHeaders
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: preferredUsernameHeaders
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.RequiredHSTSPolicy
+ map:
+ fields:
+ - name: domainPatterns
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: includeSubDomainsPolicy
+ type:
+ scalar: string
+ - name: maxAge
+ type:
+ namedType: com.github.openshift.api.config.v1.MaxAgePolicy
+ default: {}
+ - name: namespaceSelector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ - name: preloadPolicy
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.Scheduler
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1.SchedulerSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1.SchedulerStatus
+ default: {}
+- name: com.github.openshift.api.config.v1.SchedulerSpec
+ map:
+ fields:
+ - name: defaultNodeSelector
+ type:
+ scalar: string
+ - name: mastersSchedulable
+ type:
+ scalar: boolean
+ default: false
+ - name: policy
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: profile
+ type:
+ scalar: string
+ - name: profileCustomizations
+ type:
+ namedType: com.github.openshift.api.config.v1.ProfileCustomizations
+ default: {}
+- name: com.github.openshift.api.config.v1.SchedulerStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1.SecretNameReference
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.SignatureStore
+ map:
+ fields:
+ - name: ca
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: url
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.TLSSecurityProfile
+ map:
+ fields:
+ - name: custom
+ type:
+ namedType: com.github.openshift.api.config.v1.CustomTLSProfile
+ - name: intermediate
+ type:
+ namedType: com.github.openshift.api.config.v1.IntermediateTLSProfile
+ - name: modern
+ type:
+ namedType: com.github.openshift.api.config.v1.ModernTLSProfile
+ - name: old
+ type:
+ namedType: com.github.openshift.api.config.v1.OldTLSProfile
+ - name: type
+ type:
+ scalar: string
+ default: ""
+ unions:
+ - discriminator: type
+ fields:
+ - fieldName: custom
+ discriminatorValue: Custom
+ - fieldName: intermediate
+ discriminatorValue: Intermediate
+ - fieldName: modern
+ discriminatorValue: Modern
+ - fieldName: old
+ discriminatorValue: Old
+- name: com.github.openshift.api.config.v1.TemplateReference
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.TokenClaimMappings
+ map:
+ fields:
+ - name: groups
+ type:
+ namedType: com.github.openshift.api.config.v1.PrefixedClaimMapping
+ default: {}
+ - name: username
+ type:
+ namedType: com.github.openshift.api.config.v1.UsernameClaimMapping
+ default: {}
+- name: com.github.openshift.api.config.v1.TokenClaimValidationRule
+ map:
+ fields:
+ - name: requiredClaim
+ type:
+ namedType: com.github.openshift.api.config.v1.TokenRequiredClaim
+ - name: type
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.TokenConfig
+ map:
+ fields:
+ - name: accessTokenInactivityTimeout
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Duration
+ - name: accessTokenInactivityTimeoutSeconds
+ type:
+ scalar: numeric
+ - name: accessTokenMaxAgeSeconds
+ type:
+ scalar: numeric
+- name: com.github.openshift.api.config.v1.TokenIssuer
+ map:
+ fields:
+ - name: audiences
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: issuerCertificateAuthority
+ type:
+ namedType: com.github.openshift.api.config.v1.ConfigMapNameReference
+ default: {}
+ - name: issuerURL
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.TokenRequiredClaim
+ map:
+ fields:
+ - name: claim
+ type:
+ scalar: string
+ default: ""
+ - name: requiredValue
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.Update
+ map:
+ fields:
+ - name: architecture
+ type:
+ scalar: string
+ default: ""
+ - name: force
+ type:
+ scalar: boolean
+ default: false
+ - name: image
+ type:
+ scalar: string
+ default: ""
+ - name: version
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.UpdateHistory
+ map:
+ fields:
+ - name: acceptedRisks
+ type:
+ scalar: string
+ - name: completionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: image
+ type:
+ scalar: string
+ default: ""
+ - name: startedTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ default: {}
+ - name: state
+ type:
+ scalar: string
+ default: ""
+ - name: verified
+ type:
+ scalar: boolean
+ default: false
+ - name: version
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.UsernameClaimMapping
+ map:
+ fields:
+ - name: claim
+ type:
+ scalar: string
+ default: ""
+ - name: prefix
+ type:
+ namedType: com.github.openshift.api.config.v1.UsernamePrefix
+ - name: prefixPolicy
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.UsernamePrefix
+ map:
+ fields:
+ - name: prefixString
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: region
+ type:
+ scalar: string
+ default: ""
+ - name: server
+ type:
+ scalar: string
+ default: ""
+ - name: topology
+ type:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformTopology
+ default: {}
+ - name: zone
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer
+ map:
+ fields:
+ - name: type
+ type:
+ scalar: string
+ default: OpenShiftManagedDefault
+ unions:
+ - discriminator: type
+- name: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworking
+ map:
+ fields:
+ - name: external
+ type:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec
+ default: {}
+ - name: internal
+ type:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec
+ default: {}
+- name: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec
+ map:
+ fields:
+ - name: excludeNetworkSubnetCidr
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: network
+ type:
+ scalar: string
+ - name: networkSubnetCidr
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: com.github.openshift.api.config.v1.VSpherePlatformSpec
+ map:
+ fields:
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: failureDomains
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec
+ elementRelationship: associative
+ keys:
+ - name
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: machineNetworks
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: nodeNetworking
+ type:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworking
+ default: {}
+ - name: vcenters
+ type:
+ list:
+ elementType:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformVCenterSpec
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1.VSpherePlatformStatus
+ map:
+ fields:
+ - name: apiServerInternalIP
+ type:
+ scalar: string
+ - name: apiServerInternalIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: ingressIP
+ type:
+ scalar: string
+ - name: ingressIPs
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: loadBalancer
+ type:
+ namedType: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer
+ default:
+ type: OpenShiftManagedDefault
+ - name: machineNetworks
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: nodeDNSIP
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.VSpherePlatformTopology
+ map:
+ fields:
+ - name: computeCluster
+ type:
+ scalar: string
+ default: ""
+ - name: datacenter
+ type:
+ scalar: string
+ default: ""
+ - name: datastore
+ type:
+ scalar: string
+ default: ""
+ - name: folder
+ type:
+ scalar: string
+ - name: networks
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: resourcePool
+ type:
+ scalar: string
+ - name: template
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1.VSpherePlatformVCenterSpec
+ map:
+ fields:
+ - name: datacenters
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: port
+ type:
+ scalar: numeric
+ - name: server
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1.WebhookTokenAuthenticator
+ map:
+ fields:
+ - name: kubeConfig
+ type:
+ namedType: com.github.openshift.api.config.v1.SecretNameReference
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.Backup
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.BackupSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.BackupStatus
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.BackupSpec
+ map:
+ fields:
+ - name: etcd
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.EtcdBackupSpec
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.BackupStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1alpha1.ClusterImagePolicy
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.ClusterImagePolicySpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.ClusterImagePolicyStatus
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.ClusterImagePolicySpec
+ map:
+ fields:
+ - name: policy
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.Policy
+ default: {}
+ - name: scopes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: com.github.openshift.api.config.v1alpha1.ClusterImagePolicyStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+- name: com.github.openshift.api.config.v1alpha1.EtcdBackupSpec
+ map:
+ fields:
+ - name: pvcName
+ type:
+ scalar: string
+ default: ""
+ - name: retentionPolicy
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.RetentionPolicy
+ default: {}
+ - name: schedule
+ type:
+ scalar: string
+ default: ""
+ - name: timeZone
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor
+ map:
+ fields:
+ - name: fulcioCAData
+ type:
+ scalar: string
+ - name: fulcioSubject
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject
+ default: {}
+ - name: rekorKeyData
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1alpha1.GatherConfig
+ map:
+ fields:
+ - name: dataPolicy
+ type:
+ scalar: string
+ - name: disabledGatherers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: com.github.openshift.api.config.v1alpha1.ImagePolicy
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.ImagePolicySpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.ImagePolicyStatus
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.ImagePolicySpec
+ map:
+ fields:
+ - name: policy
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.Policy
+ default: {}
+ - name: scopes
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: com.github.openshift.api.config.v1alpha1.ImagePolicyStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+- name: com.github.openshift.api.config.v1alpha1.InsightsDataGather
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.InsightsDataGatherSpec
+ default: {}
+ - name: status
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.InsightsDataGatherStatus
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.InsightsDataGatherSpec
+ map:
+ fields:
+ - name: gatherConfig
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.GatherConfig
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.InsightsDataGatherStatus
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: com.github.openshift.api.config.v1alpha1.Policy
+ map:
+ fields:
+ - name: rootOfTrust
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust
+ default: {}
+ - name: signedIdentity
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.PolicyIdentity
+ default: {}
+- name: com.github.openshift.api.config.v1alpha1.PolicyFulcioSubject
+ map:
+ fields:
+ - name: oidcIssuer
+ type:
+ scalar: string
+ default: ""
+ - name: signedEmail
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1alpha1.PolicyIdentity
+ map:
+ fields:
+ - name: exactRepository
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.PolicyMatchExactRepository
+ - name: matchPolicy
+ type:
+ scalar: string
+ default: ""
+ - name: remapIdentity
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.PolicyMatchRemapIdentity
+ unions:
+ - discriminator: matchPolicy
+ fields:
+ - fieldName: exactRepository
+ discriminatorValue: PolicyMatchExactRepository
+ - fieldName: remapIdentity
+ discriminatorValue: PolicyMatchRemapIdentity
+- name: com.github.openshift.api.config.v1alpha1.PolicyMatchExactRepository
+ map:
+ fields:
+ - name: repository
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1alpha1.PolicyMatchRemapIdentity
+ map:
+ fields:
+ - name: prefix
+ type:
+ scalar: string
+ default: ""
+ - name: signedPrefix
+ type:
+ scalar: string
+ default: ""
+- name: com.github.openshift.api.config.v1alpha1.PolicyRootOfTrust
+ map:
+ fields:
+ - name: fulcioCAWithRekor
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.FulcioCAWithRekor
+ - name: policyType
+ type:
+ scalar: string
+ default: ""
+ - name: publicKey
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.PublicKey
+ unions:
+ - discriminator: policyType
+ fields:
+ - fieldName: fulcioCAWithRekor
+ discriminatorValue: FulcioCAWithRekor
+ - fieldName: publicKey
+ discriminatorValue: PublicKey
+- name: com.github.openshift.api.config.v1alpha1.PublicKey
+ map:
+ fields:
+ - name: keyData
+ type:
+ scalar: string
+ - name: rekorKeyData
+ type:
+ scalar: string
+- name: com.github.openshift.api.config.v1alpha1.RetentionNumberConfig
+ map:
+ fields:
+ - name: maxNumberOfBackups
+ type:
+ scalar: numeric
+- name: com.github.openshift.api.config.v1alpha1.RetentionPolicy
+ map:
+ fields:
+ - name: retentionNumber
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.RetentionNumberConfig
+ - name: retentionSize
+ type:
+ namedType: com.github.openshift.api.config.v1alpha1.RetentionSizeConfig
+ - name: retentionType
+ type:
+ scalar: string
+ default: ""
+ unions:
+ - discriminator: retentionType
+ fields:
+ - fieldName: retentionNumber
+ discriminatorValue: RetentionNumber
+ - fieldName: retentionSize
+ discriminatorValue: RetentionSize
+- name: com.github.openshift.api.config.v1alpha1.RetentionSizeConfig
+ map:
+ fields:
+ - name: maxSizeOfBackupsGb
+ type:
+ scalar: numeric
+- name: io.k8s.api.core.v1.ConfigMapKeySelector
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.EnvVar
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: value
+ type:
+ scalar: string
+ - name: valueFrom
+ type:
+ namedType: io.k8s.api.core.v1.EnvVarSource
+- name: io.k8s.api.core.v1.EnvVarSource
+ map:
+ fields:
+ - name: configMapKeyRef
+ type:
+ namedType: io.k8s.api.core.v1.ConfigMapKeySelector
+ - name: fieldRef
+ type:
+ namedType: io.k8s.api.core.v1.ObjectFieldSelector
+ - name: resourceFieldRef
+ type:
+ namedType: io.k8s.api.core.v1.ResourceFieldSelector
+ - name: secretKeyRef
+ type:
+ namedType: io.k8s.api.core.v1.SecretKeySelector
+- name: io.k8s.api.core.v1.ObjectFieldSelector
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: fieldPath
+ type:
+ scalar: string
+ default: ""
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.ResourceClaim
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.api.core.v1.ResourceFieldSelector
+ map:
+ fields:
+ - name: containerName
+ type:
+ scalar: string
+ - name: divisor
+ type:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ default: {}
+ - name: resource
+ type:
+ scalar: string
+ default: ""
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.ResourceRequirements
+ map:
+ fields:
+ - name: claims
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.ResourceClaim
+ elementRelationship: associative
+ keys:
+ - name
+ - name: limits
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+ - name: requests
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
+- name: io.k8s.api.core.v1.SecretKeySelector
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ - name: optional
+ type:
+ scalar: boolean
+ elementRelationship: atomic
+- name: io.k8s.api.core.v1.Toleration
+ map:
+ fields:
+ - name: effect
+ type:
+ scalar: string
+ - name: key
+ type:
+ scalar: string
+ - name: operator
+ type:
+ scalar: string
+ - name: tolerationSeconds
+ type:
+ scalar: numeric
+ - name: value
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.api.resource.Quantity
+ scalar: untyped
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ map:
+ fields:
+ - name: lastTransitionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ default: {}
+ - name: message
+ type:
+ scalar: string
+ default: ""
+ - name: observedGeneration
+ type:
+ scalar: numeric
+ - name: reason
+ type:
+ scalar: string
+ default: ""
+ - name: status
+ type:
+ scalar: string
+ default: ""
+ - name: type
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Duration
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ map:
+ fields:
+ - name: matchExpressions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement
+ elementRelationship: atomic
+ - name: matchLabels
+ type:
+ map:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: operator
+ type:
+ scalar: string
+ default: ""
+ - name: values
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: fieldsType
+ type:
+ scalar: string
+ - name: fieldsV1
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1
+ - name: manager
+ type:
+ scalar: string
+ - name: operation
+ type:
+ scalar: string
+ - name: subresource
+ type:
+ scalar: string
+ - name: time
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ map:
+ fields:
+ - name: annotations
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: creationTimestamp
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ default: {}
+ - name: deletionGracePeriodSeconds
+ type:
+ scalar: numeric
+ - name: deletionTimestamp
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: finalizers
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: generateName
+ type:
+ scalar: string
+ - name: generation
+ type:
+ scalar: numeric
+ - name: labels
+ type:
+ map:
+ elementType:
+ scalar: string
+ - name: managedFields
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry
+ elementRelationship: atomic
+ - name: name
+ type:
+ scalar: string
+ - name: namespace
+ type:
+ scalar: string
+ - name: ownerReferences
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference
+ elementRelationship: associative
+ keys:
+ - uid
+ - name: resourceVersion
+ type:
+ scalar: string
+ - name: selfLink
+ type:
+ scalar: string
+ - name: uid
+ type:
+ scalar: string
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ default: ""
+ - name: blockOwnerDeletion
+ type:
+ scalar: boolean
+ - name: controller
+ type:
+ scalar: boolean
+ - name: kind
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: uid
+ type:
+ scalar: string
+ default: ""
+ elementRelationship: atomic
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ scalar: untyped
+- name: io.k8s.apimachinery.pkg.runtime.RawExtension
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: __untyped_atomic_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+- name: __untyped_deduced_
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+`)
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go
new file mode 100644
index 0000000000..29896542d0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go
@@ -0,0 +1,117 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ "fmt"
+ "net/http"
+
+ configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
+ configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ ConfigV1() configv1.ConfigV1Interface
+ ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface
+}
+
+// Clientset contains the clients for groups.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ configV1 *configv1.ConfigV1Client
+ configV1alpha1 *configv1alpha1.ConfigV1alpha1Client
+}
+
+// ConfigV1 retrieves the ConfigV1Client
+func (c *Clientset) ConfigV1() configv1.ConfigV1Interface {
+ return c.configV1
+}
+
+// ConfigV1alpha1 retrieves the ConfigV1alpha1Client
+func (c *Clientset) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface {
+ return c.configV1alpha1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+
+ if configShallowCopy.UserAgent == "" {
+ configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ // share the transport between all clients
+ httpClient, err := rest.HTTPClientFor(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewForConfigAndClient(&configShallowCopy, httpClient)
+}
+
+// NewForConfigAndClient creates a new Clientset for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
+func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+
+ var cs Clientset
+ var err error
+ cs.configV1, err = configv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.configV1alpha1, err = configv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ cs, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.configV1 = configv1.New(c)
+ cs.configV1alpha1 = configv1alpha1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/doc.go
new file mode 100644
index 0000000000..14db57a58f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/doc.go
@@ -0,0 +1,4 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go
new file mode 100644
index 0000000000..6340555dd1
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go
@@ -0,0 +1,42 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ configv1 "github.com/openshift/api/config/v1"
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ configv1.AddToScheme,
+ configv1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go
new file mode 100644
index 0000000000..d4fff3f957
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/apiserver.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// APIServersGetter has a method to return a APIServerInterface.
+// A group's client should implement this interface.
+type APIServersGetter interface {
+ APIServers() APIServerInterface
+}
+
+// APIServerInterface has methods to work with APIServer resources.
+type APIServerInterface interface {
+ Create(ctx context.Context, aPIServer *v1.APIServer, opts metav1.CreateOptions) (*v1.APIServer, error)
+ Update(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (*v1.APIServer, error)
+ UpdateStatus(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (*v1.APIServer, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.APIServer, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.APIServerList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIServer, err error)
+ Apply(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error)
+ ApplyStatus(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error)
+ APIServerExpansion
+}
+
+// aPIServers implements APIServerInterface
+type aPIServers struct {
+ client rest.Interface
+}
+
+// newAPIServers returns a APIServers
+func newAPIServers(c *ConfigV1Client) *aPIServers {
+ return &aPIServers{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the aPIServer, and returns the corresponding aPIServer object, and an error if there is any.
+func (c *aPIServers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.APIServer, err error) {
+ result = &v1.APIServer{}
+ err = c.client.Get().
+ Resource("apiservers").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of APIServers that match those selectors.
+func (c *aPIServers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.APIServerList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.APIServerList{}
+ err = c.client.Get().
+ Resource("apiservers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested aPIServers.
+func (c *aPIServers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("apiservers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a aPIServer and creates it. Returns the server's representation of the aPIServer, and an error, if there is any.
+func (c *aPIServers) Create(ctx context.Context, aPIServer *v1.APIServer, opts metav1.CreateOptions) (result *v1.APIServer, err error) {
+ result = &v1.APIServer{}
+ err = c.client.Post().
+ Resource("apiservers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(aPIServer).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a aPIServer and updates it. Returns the server's representation of the aPIServer, and an error, if there is any.
+func (c *aPIServers) Update(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (result *v1.APIServer, err error) {
+ result = &v1.APIServer{}
+ err = c.client.Put().
+ Resource("apiservers").
+ Name(aPIServer.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(aPIServer).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *aPIServers) UpdateStatus(ctx context.Context, aPIServer *v1.APIServer, opts metav1.UpdateOptions) (result *v1.APIServer, err error) {
+ result = &v1.APIServer{}
+ err = c.client.Put().
+ Resource("apiservers").
+ Name(aPIServer.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(aPIServer).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the aPIServer and deletes it. Returns an error if one occurs.
+func (c *aPIServers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("apiservers").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *aPIServers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("apiservers").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched aPIServer.
+func (c *aPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIServer, err error) {
+ result = &v1.APIServer{}
+ err = c.client.Patch(pt).
+ Resource("apiservers").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied aPIServer.
+func (c *aPIServers) Apply(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) {
+ if aPIServer == nil {
+ return nil, fmt.Errorf("aPIServer provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(aPIServer)
+ if err != nil {
+ return nil, err
+ }
+ name := aPIServer.Name
+ if name == nil {
+ return nil, fmt.Errorf("aPIServer.Name must be provided to Apply")
+ }
+ result = &v1.APIServer{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("apiservers").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *aPIServers) ApplyStatus(ctx context.Context, aPIServer *configv1.APIServerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.APIServer, err error) {
+ if aPIServer == nil {
+ return nil, fmt.Errorf("aPIServer provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(aPIServer)
+ if err != nil {
+ return nil, err
+ }
+
+ name := aPIServer.Name
+ if name == nil {
+ return nil, fmt.Errorf("aPIServer.Name must be provided to Apply")
+ }
+
+ result = &v1.APIServer{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("apiservers").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go
new file mode 100644
index 0000000000..91c5d99d2d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/authentication.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// AuthenticationsGetter has a method to return a AuthenticationInterface.
+// A group's client should implement this interface.
+type AuthenticationsGetter interface {
+ Authentications() AuthenticationInterface
+}
+
+// AuthenticationInterface has methods to work with Authentication resources.
+type AuthenticationInterface interface {
+ Create(ctx context.Context, authentication *v1.Authentication, opts metav1.CreateOptions) (*v1.Authentication, error)
+ Update(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error)
+ UpdateStatus(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Authentication, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.AuthenticationList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Authentication, err error)
+ Apply(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error)
+ ApplyStatus(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error)
+ AuthenticationExpansion
+}
+
+// authentications implements AuthenticationInterface
+type authentications struct {
+ client rest.Interface
+}
+
+// newAuthentications returns a Authentications
+func newAuthentications(c *ConfigV1Client) *authentications {
+ return &authentications{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the authentication, and returns the corresponding authentication object, and an error if there is any.
+func (c *authentications) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Authentication, err error) {
+ result = &v1.Authentication{}
+ err = c.client.Get().
+ Resource("authentications").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Authentications that match those selectors.
+func (c *authentications) List(ctx context.Context, opts metav1.ListOptions) (result *v1.AuthenticationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.AuthenticationList{}
+ err = c.client.Get().
+ Resource("authentications").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested authentications.
+func (c *authentications) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("authentications").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a authentication and creates it. Returns the server's representation of the authentication, and an error, if there is any.
+func (c *authentications) Create(ctx context.Context, authentication *v1.Authentication, opts metav1.CreateOptions) (result *v1.Authentication, err error) {
+ result = &v1.Authentication{}
+ err = c.client.Post().
+ Resource("authentications").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(authentication).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a authentication and updates it. Returns the server's representation of the authentication, and an error, if there is any.
+func (c *authentications) Update(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (result *v1.Authentication, err error) {
+ result = &v1.Authentication{}
+ err = c.client.Put().
+ Resource("authentications").
+ Name(authentication.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(authentication).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *authentications) UpdateStatus(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (result *v1.Authentication, err error) {
+ result = &v1.Authentication{}
+ err = c.client.Put().
+ Resource("authentications").
+ Name(authentication.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(authentication).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the authentication and deletes it. Returns an error if one occurs.
+func (c *authentications) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("authentications").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *authentications) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("authentications").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched authentication.
+func (c *authentications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Authentication, err error) {
+ result = &v1.Authentication{}
+ err = c.client.Patch(pt).
+ Resource("authentications").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied authentication.
+func (c *authentications) Apply(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) {
+ if authentication == nil {
+ return nil, fmt.Errorf("authentication provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(authentication)
+ if err != nil {
+ return nil, err
+ }
+ name := authentication.Name
+ if name == nil {
+ return nil, fmt.Errorf("authentication.Name must be provided to Apply")
+ }
+ result = &v1.Authentication{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("authentications").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *authentications) ApplyStatus(ctx context.Context, authentication *configv1.AuthenticationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Authentication, err error) {
+ if authentication == nil {
+ return nil, fmt.Errorf("authentication provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(authentication)
+ if err != nil {
+ return nil, err
+ }
+
+ name := authentication.Name
+ if name == nil {
+ return nil, fmt.Errorf("authentication.Name must be provided to Apply")
+ }
+
+ result = &v1.Authentication{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("authentications").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go
new file mode 100644
index 0000000000..e2d09ef1cb
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/build.go
@@ -0,0 +1,181 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BuildsGetter has a method to return a BuildInterface.
+// A group's client should implement this interface.
+type BuildsGetter interface {
+ Builds() BuildInterface
+}
+
+// BuildInterface has methods to work with Build resources.
+type BuildInterface interface {
+ Create(ctx context.Context, build *v1.Build, opts metav1.CreateOptions) (*v1.Build, error)
+ Update(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (*v1.Build, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Build, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.BuildList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Build, err error)
+ Apply(ctx context.Context, build *configv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error)
+ BuildExpansion
+}
+
+// builds implements BuildInterface
+type builds struct {
+ client rest.Interface
+}
+
+// newBuilds returns a Builds
+func newBuilds(c *ConfigV1Client) *builds {
+ return &builds{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the build, and returns the corresponding build object, and an error if there is any.
+func (c *builds) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Build, err error) {
+ result = &v1.Build{}
+ err = c.client.Get().
+ Resource("builds").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Builds that match those selectors.
+func (c *builds) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BuildList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.BuildList{}
+ err = c.client.Get().
+ Resource("builds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested builds.
+func (c *builds) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("builds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any.
+func (c *builds) Create(ctx context.Context, build *v1.Build, opts metav1.CreateOptions) (result *v1.Build, err error) {
+ result = &v1.Build{}
+ err = c.client.Post().
+ Resource("builds").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(build).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any.
+func (c *builds) Update(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (result *v1.Build, err error) {
+ result = &v1.Build{}
+ err = c.client.Put().
+ Resource("builds").
+ Name(build.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(build).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the build and deletes it. Returns an error if one occurs.
+func (c *builds) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("builds").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *builds) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("builds").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched build.
+func (c *builds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Build, err error) {
+ result = &v1.Build{}
+ err = c.client.Patch(pt).
+ Resource("builds").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied build.
+func (c *builds) Apply(ctx context.Context, build *configv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error) {
+ if build == nil {
+ return nil, fmt.Errorf("build provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(build)
+ if err != nil {
+ return nil, err
+ }
+ name := build.Name
+ if name == nil {
+ return nil, fmt.Errorf("build.Name must be provided to Apply")
+ }
+ result = &v1.Build{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("builds").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go
new file mode 100644
index 0000000000..941a160948
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusteroperator.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClusterOperatorsGetter has a method to return a ClusterOperatorInterface.
+// A group's client should implement this interface.
+type ClusterOperatorsGetter interface {
+ ClusterOperators() ClusterOperatorInterface
+}
+
+// ClusterOperatorInterface has methods to work with ClusterOperator resources.
+type ClusterOperatorInterface interface {
+ Create(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.CreateOptions) (*v1.ClusterOperator, error)
+ Update(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error)
+ UpdateStatus(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (*v1.ClusterOperator, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterOperator, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterOperatorList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterOperator, err error)
+ Apply(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error)
+ ApplyStatus(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error)
+ ClusterOperatorExpansion
+}
+
+// clusterOperators implements ClusterOperatorInterface
+type clusterOperators struct {
+ client rest.Interface
+}
+
+// newClusterOperators returns a ClusterOperators
+func newClusterOperators(c *ConfigV1Client) *clusterOperators {
+ return &clusterOperators{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the clusterOperator, and returns the corresponding clusterOperator object, and an error if there is any.
+func (c *clusterOperators) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterOperator, err error) {
+ result = &v1.ClusterOperator{}
+ err = c.client.Get().
+ Resource("clusteroperators").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ClusterOperators that match those selectors.
+func (c *clusterOperators) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterOperatorList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ClusterOperatorList{}
+ err = c.client.Get().
+ Resource("clusteroperators").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterOperators.
+func (c *clusterOperators) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("clusteroperators").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a clusterOperator and creates it. Returns the server's representation of the clusterOperator, and an error, if there is any.
+func (c *clusterOperators) Create(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.CreateOptions) (result *v1.ClusterOperator, err error) {
+ result = &v1.ClusterOperator{}
+ err = c.client.Post().
+ Resource("clusteroperators").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterOperator).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a clusterOperator and updates it. Returns the server's representation of the clusterOperator, and an error, if there is any.
+func (c *clusterOperators) Update(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (result *v1.ClusterOperator, err error) {
+ result = &v1.ClusterOperator{}
+ err = c.client.Put().
+ Resource("clusteroperators").
+ Name(clusterOperator.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterOperator).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clusterOperators) UpdateStatus(ctx context.Context, clusterOperator *v1.ClusterOperator, opts metav1.UpdateOptions) (result *v1.ClusterOperator, err error) {
+ result = &v1.ClusterOperator{}
+ err = c.client.Put().
+ Resource("clusteroperators").
+ Name(clusterOperator.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterOperator).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the clusterOperator and deletes it. Returns an error if one occurs.
+func (c *clusterOperators) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("clusteroperators").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterOperators) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("clusteroperators").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched clusterOperator.
+func (c *clusterOperators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterOperator, err error) {
+ result = &v1.ClusterOperator{}
+ err = c.client.Patch(pt).
+ Resource("clusteroperators").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied clusterOperator.
+func (c *clusterOperators) Apply(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) {
+ if clusterOperator == nil {
+ return nil, fmt.Errorf("clusterOperator provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(clusterOperator)
+ if err != nil {
+ return nil, err
+ }
+ name := clusterOperator.Name
+ if name == nil {
+ return nil, fmt.Errorf("clusterOperator.Name must be provided to Apply")
+ }
+ result = &v1.ClusterOperator{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("clusteroperators").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *clusterOperators) ApplyStatus(ctx context.Context, clusterOperator *configv1.ClusterOperatorApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterOperator, err error) {
+ if clusterOperator == nil {
+ return nil, fmt.Errorf("clusterOperator provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(clusterOperator)
+ if err != nil {
+ return nil, err
+ }
+
+ name := clusterOperator.Name
+ if name == nil {
+ return nil, fmt.Errorf("clusterOperator.Name must be provided to Apply")
+ }
+
+ result = &v1.ClusterOperator{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("clusteroperators").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go
new file mode 100644
index 0000000000..8b7e5b9d24
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clusterversion.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClusterVersionsGetter has a method to return a ClusterVersionInterface.
+// A group's client should implement this interface.
+type ClusterVersionsGetter interface {
+ ClusterVersions() ClusterVersionInterface
+}
+
+// ClusterVersionInterface has methods to work with ClusterVersion resources.
+type ClusterVersionInterface interface {
+ Create(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.CreateOptions) (*v1.ClusterVersion, error)
+ Update(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (*v1.ClusterVersion, error)
+ UpdateStatus(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (*v1.ClusterVersion, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterVersion, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterVersionList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterVersion, err error)
+ Apply(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error)
+ ApplyStatus(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error)
+ ClusterVersionExpansion
+}
+
+// clusterVersions implements ClusterVersionInterface
+type clusterVersions struct {
+ client rest.Interface
+}
+
+// newClusterVersions returns a ClusterVersions
+func newClusterVersions(c *ConfigV1Client) *clusterVersions {
+ return &clusterVersions{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the clusterVersion, and returns the corresponding clusterVersion object, and an error if there is any.
+func (c *clusterVersions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterVersion, err error) {
+ result = &v1.ClusterVersion{}
+ err = c.client.Get().
+ Resource("clusterversions").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ClusterVersions that match those selectors.
+func (c *clusterVersions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterVersionList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ClusterVersionList{}
+ err = c.client.Get().
+ Resource("clusterversions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterVersions.
+func (c *clusterVersions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("clusterversions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a clusterVersion and creates it. Returns the server's representation of the clusterVersion, and an error, if there is any.
+func (c *clusterVersions) Create(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.CreateOptions) (result *v1.ClusterVersion, err error) {
+ result = &v1.ClusterVersion{}
+ err = c.client.Post().
+ Resource("clusterversions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterVersion).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a clusterVersion and updates it. Returns the server's representation of the clusterVersion, and an error, if there is any.
+func (c *clusterVersions) Update(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (result *v1.ClusterVersion, err error) {
+ result = &v1.ClusterVersion{}
+ err = c.client.Put().
+ Resource("clusterversions").
+ Name(clusterVersion.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterVersion).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clusterVersions) UpdateStatus(ctx context.Context, clusterVersion *v1.ClusterVersion, opts metav1.UpdateOptions) (result *v1.ClusterVersion, err error) {
+ result = &v1.ClusterVersion{}
+ err = c.client.Put().
+ Resource("clusterversions").
+ Name(clusterVersion.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterVersion).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the clusterVersion and deletes it. Returns an error if one occurs.
+func (c *clusterVersions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("clusterversions").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterVersions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("clusterversions").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched clusterVersion.
+func (c *clusterVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterVersion, err error) {
+ result = &v1.ClusterVersion{}
+ err = c.client.Patch(pt).
+ Resource("clusterversions").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied clusterVersion.
+func (c *clusterVersions) Apply(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) {
+ if clusterVersion == nil {
+ return nil, fmt.Errorf("clusterVersion provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(clusterVersion)
+ if err != nil {
+ return nil, err
+ }
+ name := clusterVersion.Name
+ if name == nil {
+ return nil, fmt.Errorf("clusterVersion.Name must be provided to Apply")
+ }
+ result = &v1.ClusterVersion{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("clusterversions").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *clusterVersions) ApplyStatus(ctx context.Context, clusterVersion *configv1.ClusterVersionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterVersion, err error) {
+ if clusterVersion == nil {
+ return nil, fmt.Errorf("clusterVersion provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(clusterVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ name := clusterVersion.Name
+ if name == nil {
+ return nil, fmt.Errorf("clusterVersion.Name must be provided to Apply")
+ }
+
+ result = &v1.ClusterVersion{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("clusterversions").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go
new file mode 100644
index 0000000000..de4f2fa32a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go
@@ -0,0 +1,191 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ v1 "github.com/openshift/api/config/v1"
+ "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type ConfigV1Interface interface {
+ RESTClient() rest.Interface
+ APIServersGetter
+ AuthenticationsGetter
+ BuildsGetter
+ ClusterOperatorsGetter
+ ClusterVersionsGetter
+ ConsolesGetter
+ DNSesGetter
+ FeatureGatesGetter
+ ImagesGetter
+ ImageContentPoliciesGetter
+ ImageDigestMirrorSetsGetter
+ ImageTagMirrorSetsGetter
+ InfrastructuresGetter
+ IngressesGetter
+ NetworksGetter
+ NodesGetter
+ OAuthsGetter
+ OperatorHubsGetter
+ ProjectsGetter
+ ProxiesGetter
+ SchedulersGetter
+}
+
+// ConfigV1Client is used to interact with features provided by the config.openshift.io group.
+type ConfigV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *ConfigV1Client) APIServers() APIServerInterface {
+ return newAPIServers(c)
+}
+
+func (c *ConfigV1Client) Authentications() AuthenticationInterface {
+ return newAuthentications(c)
+}
+
+func (c *ConfigV1Client) Builds() BuildInterface {
+ return newBuilds(c)
+}
+
+func (c *ConfigV1Client) ClusterOperators() ClusterOperatorInterface {
+ return newClusterOperators(c)
+}
+
+func (c *ConfigV1Client) ClusterVersions() ClusterVersionInterface {
+ return newClusterVersions(c)
+}
+
+func (c *ConfigV1Client) Consoles() ConsoleInterface {
+ return newConsoles(c)
+}
+
+func (c *ConfigV1Client) DNSes() DNSInterface {
+ return newDNSes(c)
+}
+
+func (c *ConfigV1Client) FeatureGates() FeatureGateInterface {
+ return newFeatureGates(c)
+}
+
+func (c *ConfigV1Client) Images() ImageInterface {
+ return newImages(c)
+}
+
+func (c *ConfigV1Client) ImageContentPolicies() ImageContentPolicyInterface {
+ return newImageContentPolicies(c)
+}
+
+func (c *ConfigV1Client) ImageDigestMirrorSets() ImageDigestMirrorSetInterface {
+ return newImageDigestMirrorSets(c)
+}
+
+func (c *ConfigV1Client) ImageTagMirrorSets() ImageTagMirrorSetInterface {
+ return newImageTagMirrorSets(c)
+}
+
+func (c *ConfigV1Client) Infrastructures() InfrastructureInterface {
+ return newInfrastructures(c)
+}
+
+func (c *ConfigV1Client) Ingresses() IngressInterface {
+ return newIngresses(c)
+}
+
+func (c *ConfigV1Client) Networks() NetworkInterface {
+ return newNetworks(c)
+}
+
+func (c *ConfigV1Client) Nodes() NodeInterface {
+ return newNodes(c)
+}
+
+func (c *ConfigV1Client) OAuths() OAuthInterface {
+ return newOAuths(c)
+}
+
+func (c *ConfigV1Client) OperatorHubs() OperatorHubInterface {
+ return newOperatorHubs(c)
+}
+
+func (c *ConfigV1Client) Projects() ProjectInterface {
+ return newProjects(c)
+}
+
+func (c *ConfigV1Client) Proxies() ProxyInterface {
+ return newProxies(c)
+}
+
+func (c *ConfigV1Client) Schedulers() SchedulerInterface {
+ return newSchedulers(c)
+}
+
+// NewForConfig creates a new ConfigV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*ConfigV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new ConfigV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &ConfigV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ConfigV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ConfigV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new ConfigV1Client for the given RESTClient.
+func New(c rest.Interface) *ConfigV1Client {
+ return &ConfigV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ConfigV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go
new file mode 100644
index 0000000000..99c51bf970
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/console.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ConsolesGetter has a method to return a ConsoleInterface.
+// A group's client should implement this interface.
+type ConsolesGetter interface {
+ Consoles() ConsoleInterface
+}
+
+// ConsoleInterface has methods to work with Console resources.
+type ConsoleInterface interface {
+ Create(ctx context.Context, console *v1.Console, opts metav1.CreateOptions) (*v1.Console, error)
+ Update(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error)
+ UpdateStatus(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Console, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ConsoleList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Console, err error)
+ Apply(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error)
+ ApplyStatus(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error)
+ ConsoleExpansion
+}
+
+// consoles implements ConsoleInterface
+type consoles struct {
+ client rest.Interface
+}
+
+// newConsoles returns a Consoles
+func newConsoles(c *ConfigV1Client) *consoles {
+ return &consoles{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the console, and returns the corresponding console object, and an error if there is any.
+func (c *consoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Console, err error) {
+ result = &v1.Console{}
+ err = c.client.Get().
+ Resource("consoles").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Consoles that match those selectors.
+func (c *consoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConsoleList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ConsoleList{}
+ err = c.client.Get().
+ Resource("consoles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested consoles.
+func (c *consoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("consoles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a console and creates it. Returns the server's representation of the console, and an error, if there is any.
+func (c *consoles) Create(ctx context.Context, console *v1.Console, opts metav1.CreateOptions) (result *v1.Console, err error) {
+ result = &v1.Console{}
+ err = c.client.Post().
+ Resource("consoles").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(console).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a console and updates it. Returns the server's representation of the console, and an error, if there is any.
+func (c *consoles) Update(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (result *v1.Console, err error) {
+ result = &v1.Console{}
+ err = c.client.Put().
+ Resource("consoles").
+ Name(console.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(console).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *consoles) UpdateStatus(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (result *v1.Console, err error) {
+ result = &v1.Console{}
+ err = c.client.Put().
+ Resource("consoles").
+ Name(console.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(console).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the console and deletes it. Returns an error if one occurs.
+func (c *consoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("consoles").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *consoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("consoles").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched console.
+func (c *consoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Console, err error) {
+ result = &v1.Console{}
+ err = c.client.Patch(pt).
+ Resource("consoles").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied console.
+func (c *consoles) Apply(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) {
+ if console == nil {
+ return nil, fmt.Errorf("console provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(console)
+ if err != nil {
+ return nil, err
+ }
+ name := console.Name
+ if name == nil {
+ return nil, fmt.Errorf("console.Name must be provided to Apply")
+ }
+ result = &v1.Console{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("consoles").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *consoles) ApplyStatus(ctx context.Context, console *configv1.ConsoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Console, err error) {
+ if console == nil {
+ return nil, fmt.Errorf("console provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(console)
+ if err != nil {
+ return nil, err
+ }
+
+ name := console.Name
+ if name == nil {
+ return nil, fmt.Errorf("console.Name must be provided to Apply")
+ }
+
+ result = &v1.Console{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("consoles").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go
new file mode 100644
index 0000000000..86fbbcf959
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/dns.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// DNSesGetter has a method to return a DNSInterface.
+// A group's client should implement this interface.
+type DNSesGetter interface {
+ DNSes() DNSInterface
+}
+
+// DNSInterface has methods to work with DNS resources.
+type DNSInterface interface {
+ Create(ctx context.Context, dNS *v1.DNS, opts metav1.CreateOptions) (*v1.DNS, error)
+ Update(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error)
+ UpdateStatus(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DNS, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.DNSList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNS, err error)
+ Apply(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error)
+ ApplyStatus(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error)
+ DNSExpansion
+}
+
+// dNSes implements DNSInterface
+type dNSes struct {
+ client rest.Interface
+}
+
+// newDNSes returns a DNSes
+func newDNSes(c *ConfigV1Client) *dNSes {
+ return &dNSes{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the dNS, and returns the corresponding dNS object, and an error if there is any.
+func (c *dNSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DNS, err error) {
+ result = &v1.DNS{}
+ err = c.client.Get().
+ Resource("dnses").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of DNSes that match those selectors.
+func (c *dNSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DNSList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.DNSList{}
+ err = c.client.Get().
+ Resource("dnses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested dNSes.
+func (c *dNSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("dnses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a dNS and creates it. Returns the server's representation of the dNS, and an error, if there is any.
+func (c *dNSes) Create(ctx context.Context, dNS *v1.DNS, opts metav1.CreateOptions) (result *v1.DNS, err error) {
+ result = &v1.DNS{}
+ err = c.client.Post().
+ Resource("dnses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dNS).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a dNS and updates it. Returns the server's representation of the dNS, and an error, if there is any.
+func (c *dNSes) Update(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (result *v1.DNS, err error) {
+ result = &v1.DNS{}
+ err = c.client.Put().
+ Resource("dnses").
+ Name(dNS.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dNS).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *dNSes) UpdateStatus(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (result *v1.DNS, err error) {
+ result = &v1.DNS{}
+ err = c.client.Put().
+ Resource("dnses").
+ Name(dNS.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dNS).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the dNS and deletes it. Returns an error if one occurs.
+func (c *dNSes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("dnses").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *dNSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("dnses").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched dNS.
+func (c *dNSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNS, err error) {
+ result = &v1.DNS{}
+ err = c.client.Patch(pt).
+ Resource("dnses").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied dNS.
+func (c *dNSes) Apply(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) {
+ if dNS == nil {
+ return nil, fmt.Errorf("dNS provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(dNS)
+ if err != nil {
+ return nil, err
+ }
+ name := dNS.Name
+ if name == nil {
+ return nil, fmt.Errorf("dNS.Name must be provided to Apply")
+ }
+ result = &v1.DNS{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("dnses").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *dNSes) ApplyStatus(ctx context.Context, dNS *configv1.DNSApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DNS, err error) {
+ if dNS == nil {
+ return nil, fmt.Errorf("dNS provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(dNS)
+ if err != nil {
+ return nil, err
+ }
+
+ name := dNS.Name
+ if name == nil {
+ return nil, fmt.Errorf("dNS.Name must be provided to Apply")
+ }
+
+ result = &v1.DNS{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("dnses").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/doc.go
new file mode 100644
index 0000000000..225e6b2be3
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/doc.go
@@ -0,0 +1,4 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go
new file mode 100644
index 0000000000..112322c84b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// FeatureGatesGetter has a method to return a FeatureGateInterface.
+// A group's client should implement this interface.
+type FeatureGatesGetter interface {
+ FeatureGates() FeatureGateInterface
+}
+
+// FeatureGateInterface has methods to work with FeatureGate resources.
+type FeatureGateInterface interface {
+ Create(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.CreateOptions) (*v1.FeatureGate, error)
+ Update(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (*v1.FeatureGate, error)
+ UpdateStatus(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (*v1.FeatureGate, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FeatureGate, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.FeatureGateList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FeatureGate, err error)
+ Apply(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error)
+ ApplyStatus(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error)
+ FeatureGateExpansion
+}
+
+// featureGates implements FeatureGateInterface
+type featureGates struct {
+ client rest.Interface
+}
+
+// newFeatureGates returns a FeatureGates
+func newFeatureGates(c *ConfigV1Client) *featureGates {
+ return &featureGates{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the featureGate, and returns the corresponding featureGate object, and an error if there is any.
+func (c *featureGates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FeatureGate, err error) {
+ result = &v1.FeatureGate{}
+ err = c.client.Get().
+ Resource("featuregates").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of FeatureGates that match those selectors.
+func (c *featureGates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FeatureGateList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.FeatureGateList{}
+ err = c.client.Get().
+ Resource("featuregates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested featureGates.
+func (c *featureGates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("featuregates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a featureGate and creates it. Returns the server's representation of the featureGate, and an error, if there is any.
+func (c *featureGates) Create(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.CreateOptions) (result *v1.FeatureGate, err error) {
+ result = &v1.FeatureGate{}
+ err = c.client.Post().
+ Resource("featuregates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(featureGate).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a featureGate and updates it. Returns the server's representation of the featureGate, and an error, if there is any.
+func (c *featureGates) Update(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (result *v1.FeatureGate, err error) {
+ result = &v1.FeatureGate{}
+ err = c.client.Put().
+ Resource("featuregates").
+ Name(featureGate.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(featureGate).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *featureGates) UpdateStatus(ctx context.Context, featureGate *v1.FeatureGate, opts metav1.UpdateOptions) (result *v1.FeatureGate, err error) {
+ result = &v1.FeatureGate{}
+ err = c.client.Put().
+ Resource("featuregates").
+ Name(featureGate.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(featureGate).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the featureGate and deletes it. Returns an error if one occurs.
+func (c *featureGates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("featuregates").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *featureGates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("featuregates").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched featureGate.
+func (c *featureGates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FeatureGate, err error) {
+ result = &v1.FeatureGate{}
+ err = c.client.Patch(pt).
+ Resource("featuregates").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied featureGate.
+func (c *featureGates) Apply(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) {
+ if featureGate == nil {
+ return nil, fmt.Errorf("featureGate provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(featureGate)
+ if err != nil {
+ return nil, err
+ }
+ name := featureGate.Name
+ if name == nil {
+ return nil, fmt.Errorf("featureGate.Name must be provided to Apply")
+ }
+ result = &v1.FeatureGate{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("featuregates").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *featureGates) ApplyStatus(ctx context.Context, featureGate *configv1.FeatureGateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FeatureGate, err error) {
+ if featureGate == nil {
+ return nil, fmt.Errorf("featureGate provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(featureGate)
+ if err != nil {
+ return nil, err
+ }
+
+ name := featureGate.Name
+ if name == nil {
+ return nil, fmt.Errorf("featureGate.Name must be provided to Apply")
+ }
+
+ result = &v1.FeatureGate{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("featuregates").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go
new file mode 100644
index 0000000000..a56721ba9d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go
@@ -0,0 +1,45 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type APIServerExpansion interface{}
+
+type AuthenticationExpansion interface{}
+
+type BuildExpansion interface{}
+
+type ClusterOperatorExpansion interface{}
+
+type ClusterVersionExpansion interface{}
+
+type ConsoleExpansion interface{}
+
+type DNSExpansion interface{}
+
+type FeatureGateExpansion interface{}
+
+type ImageExpansion interface{}
+
+type ImageContentPolicyExpansion interface{}
+
+type ImageDigestMirrorSetExpansion interface{}
+
+type ImageTagMirrorSetExpansion interface{}
+
+type InfrastructureExpansion interface{}
+
+type IngressExpansion interface{}
+
+type NetworkExpansion interface{}
+
+type NodeExpansion interface{}
+
+type OAuthExpansion interface{}
+
+type OperatorHubExpansion interface{}
+
+type ProjectExpansion interface{}
+
+type ProxyExpansion interface{}
+
+type SchedulerExpansion interface{}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go
new file mode 100644
index 0000000000..5357f96d8a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/image.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ImagesGetter has a method to return a ImageInterface.
+// A group's client should implement this interface.
+type ImagesGetter interface {
+ Images() ImageInterface
+}
+
+// ImageInterface has methods to work with Image resources.
+type ImageInterface interface {
+ Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (*v1.Image, error)
+ Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (*v1.Image, error)
+ UpdateStatus(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (*v1.Image, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Image, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error)
+ Apply(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error)
+ ApplyStatus(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error)
+ ImageExpansion
+}
+
+// images implements ImageInterface
+type images struct {
+ client rest.Interface
+}
+
+// newImages returns a Images
+func newImages(c *ConfigV1Client) *images {
+ return &images{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the image, and returns the corresponding image object, and an error if there is any.
+func (c *images) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Image, err error) {
+ result = &v1.Image{}
+ err = c.client.Get().
+ Resource("images").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Images that match those selectors.
+func (c *images) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ImageList{}
+ err = c.client.Get().
+ Resource("images").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested images.
+func (c *images) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("images").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any.
+func (c *images) Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (result *v1.Image, err error) {
+ result = &v1.Image{}
+ err = c.client.Post().
+ Resource("images").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(image).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any.
+func (c *images) Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (result *v1.Image, err error) {
+ result = &v1.Image{}
+ err = c.client.Put().
+ Resource("images").
+ Name(image.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(image).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *images) UpdateStatus(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (result *v1.Image, err error) {
+ result = &v1.Image{}
+ err = c.client.Put().
+ Resource("images").
+ Name(image.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(image).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the image and deletes it. Returns an error if one occurs.
+func (c *images) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("images").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *images) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("images").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched image.
+func (c *images) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) {
+ result = &v1.Image{}
+ err = c.client.Patch(pt).
+ Resource("images").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied image.
+func (c *images) Apply(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) {
+ if image == nil {
+ return nil, fmt.Errorf("image provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(image)
+ if err != nil {
+ return nil, err
+ }
+ name := image.Name
+ if name == nil {
+ return nil, fmt.Errorf("image.Name must be provided to Apply")
+ }
+ result = &v1.Image{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("images").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *images) ApplyStatus(ctx context.Context, image *configv1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) {
+ if image == nil {
+ return nil, fmt.Errorf("image provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(image)
+ if err != nil {
+ return nil, err
+ }
+
+ name := image.Name
+ if name == nil {
+ return nil, fmt.Errorf("image.Name must be provided to Apply")
+ }
+
+ result = &v1.Image{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("images").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go
new file mode 100644
index 0000000000..3128290ca5
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go
@@ -0,0 +1,181 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ImageContentPoliciesGetter has a method to return a ImageContentPolicyInterface.
+// A group's client should implement this interface.
+type ImageContentPoliciesGetter interface {
+ ImageContentPolicies() ImageContentPolicyInterface
+}
+
+// ImageContentPolicyInterface has methods to work with ImageContentPolicy resources.
+type ImageContentPolicyInterface interface {
+ Create(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.CreateOptions) (*v1.ImageContentPolicy, error)
+ Update(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.UpdateOptions) (*v1.ImageContentPolicy, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageContentPolicy, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageContentPolicyList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageContentPolicy, err error)
+ Apply(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageContentPolicy, err error)
+ ImageContentPolicyExpansion
+}
+
+// imageContentPolicies implements ImageContentPolicyInterface
+type imageContentPolicies struct {
+ client rest.Interface
+}
+
+// newImageContentPolicies returns a ImageContentPolicies
+func newImageContentPolicies(c *ConfigV1Client) *imageContentPolicies {
+ return &imageContentPolicies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the imageContentPolicy, and returns the corresponding imageContentPolicy object, and an error if there is any.
+func (c *imageContentPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageContentPolicy, err error) {
+ result = &v1.ImageContentPolicy{}
+ err = c.client.Get().
+ Resource("imagecontentpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ImageContentPolicies that match those selectors.
+func (c *imageContentPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageContentPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ImageContentPolicyList{}
+ err = c.client.Get().
+ Resource("imagecontentpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested imageContentPolicies.
+func (c *imageContentPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("imagecontentpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a imageContentPolicy and creates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any.
+func (c *imageContentPolicies) Create(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.CreateOptions) (result *v1.ImageContentPolicy, err error) {
+ result = &v1.ImageContentPolicy{}
+ err = c.client.Post().
+ Resource("imagecontentpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageContentPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a imageContentPolicy and updates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any.
+func (c *imageContentPolicies) Update(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.UpdateOptions) (result *v1.ImageContentPolicy, err error) {
+ result = &v1.ImageContentPolicy{}
+ err = c.client.Put().
+ Resource("imagecontentpolicies").
+ Name(imageContentPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageContentPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the imageContentPolicy and deletes it. Returns an error if one occurs.
+func (c *imageContentPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("imagecontentpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *imageContentPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("imagecontentpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched imageContentPolicy.
+func (c *imageContentPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageContentPolicy, err error) {
+ result = &v1.ImageContentPolicy{}
+ err = c.client.Patch(pt).
+ Resource("imagecontentpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied imageContentPolicy.
+func (c *imageContentPolicies) Apply(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageContentPolicy, err error) {
+ if imageContentPolicy == nil {
+ return nil, fmt.Errorf("imageContentPolicy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(imageContentPolicy)
+ if err != nil {
+ return nil, err
+ }
+ name := imageContentPolicy.Name
+ if name == nil {
+ return nil, fmt.Errorf("imageContentPolicy.Name must be provided to Apply")
+ }
+ result = &v1.ImageContentPolicy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("imagecontentpolicies").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go
new file mode 100644
index 0000000000..65e01a8446
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagedigestmirrorset.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ImageDigestMirrorSetsGetter has a method to return a ImageDigestMirrorSetInterface.
+// A group's client should implement this interface.
+type ImageDigestMirrorSetsGetter interface {
+ ImageDigestMirrorSets() ImageDigestMirrorSetInterface
+}
+
+// ImageDigestMirrorSetInterface has methods to work with ImageDigestMirrorSet resources.
+type ImageDigestMirrorSetInterface interface {
+ Create(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.CreateOptions) (*v1.ImageDigestMirrorSet, error)
+ Update(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*v1.ImageDigestMirrorSet, error)
+ UpdateStatus(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (*v1.ImageDigestMirrorSet, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageDigestMirrorSet, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageDigestMirrorSetList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageDigestMirrorSet, err error)
+ Apply(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error)
+ ApplyStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error)
+ ImageDigestMirrorSetExpansion
+}
+
+// imageDigestMirrorSets implements ImageDigestMirrorSetInterface
+type imageDigestMirrorSets struct {
+ client rest.Interface
+}
+
+// newImageDigestMirrorSets returns a ImageDigestMirrorSets
+func newImageDigestMirrorSets(c *ConfigV1Client) *imageDigestMirrorSets {
+ return &imageDigestMirrorSets{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the imageDigestMirrorSet, and returns the corresponding imageDigestMirrorSet object, and an error if there is any.
+func (c *imageDigestMirrorSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageDigestMirrorSet, err error) {
+ result = &v1.ImageDigestMirrorSet{}
+ err = c.client.Get().
+ Resource("imagedigestmirrorsets").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ImageDigestMirrorSets that match those selectors.
+func (c *imageDigestMirrorSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageDigestMirrorSetList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ImageDigestMirrorSetList{}
+ err = c.client.Get().
+ Resource("imagedigestmirrorsets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested imageDigestMirrorSets.
+func (c *imageDigestMirrorSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("imagedigestmirrorsets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a imageDigestMirrorSet and creates it. Returns the server's representation of the imageDigestMirrorSet, and an error, if there is any.
+func (c *imageDigestMirrorSets) Create(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.CreateOptions) (result *v1.ImageDigestMirrorSet, err error) {
+ result = &v1.ImageDigestMirrorSet{}
+ err = c.client.Post().
+ Resource("imagedigestmirrorsets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageDigestMirrorSet).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a imageDigestMirrorSet and updates it. Returns the server's representation of the imageDigestMirrorSet, and an error, if there is any.
+func (c *imageDigestMirrorSets) Update(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageDigestMirrorSet, err error) {
+ result = &v1.ImageDigestMirrorSet{}
+ err = c.client.Put().
+ Resource("imagedigestmirrorsets").
+ Name(imageDigestMirrorSet.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageDigestMirrorSet).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *imageDigestMirrorSets) UpdateStatus(ctx context.Context, imageDigestMirrorSet *v1.ImageDigestMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageDigestMirrorSet, err error) {
+ result = &v1.ImageDigestMirrorSet{}
+ err = c.client.Put().
+ Resource("imagedigestmirrorsets").
+ Name(imageDigestMirrorSet.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageDigestMirrorSet).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the imageDigestMirrorSet and deletes it. Returns an error if one occurs.
+func (c *imageDigestMirrorSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("imagedigestmirrorsets").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *imageDigestMirrorSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("imagedigestmirrorsets").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched imageDigestMirrorSet.
+func (c *imageDigestMirrorSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageDigestMirrorSet, err error) {
+ result = &v1.ImageDigestMirrorSet{}
+ err = c.client.Patch(pt).
+ Resource("imagedigestmirrorsets").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied imageDigestMirrorSet.
+func (c *imageDigestMirrorSets) Apply(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) {
+ if imageDigestMirrorSet == nil {
+ return nil, fmt.Errorf("imageDigestMirrorSet provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(imageDigestMirrorSet)
+ if err != nil {
+ return nil, err
+ }
+ name := imageDigestMirrorSet.Name
+ if name == nil {
+ return nil, fmt.Errorf("imageDigestMirrorSet.Name must be provided to Apply")
+ }
+ result = &v1.ImageDigestMirrorSet{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("imagedigestmirrorsets").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *imageDigestMirrorSets) ApplyStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageDigestMirrorSet, err error) {
+ if imageDigestMirrorSet == nil {
+ return nil, fmt.Errorf("imageDigestMirrorSet provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(imageDigestMirrorSet)
+ if err != nil {
+ return nil, err
+ }
+
+ name := imageDigestMirrorSet.Name
+ if name == nil {
+ return nil, fmt.Errorf("imageDigestMirrorSet.Name must be provided to Apply")
+ }
+
+ result = &v1.ImageDigestMirrorSet{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("imagedigestmirrorsets").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go
new file mode 100644
index 0000000000..dc8337970d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagetagmirrorset.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ImageTagMirrorSetsGetter has a method to return a ImageTagMirrorSetInterface.
+// A group's client should implement this interface.
+type ImageTagMirrorSetsGetter interface {
+ ImageTagMirrorSets() ImageTagMirrorSetInterface
+}
+
+// ImageTagMirrorSetInterface has methods to work with ImageTagMirrorSet resources.
+type ImageTagMirrorSetInterface interface {
+ Create(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.CreateOptions) (*v1.ImageTagMirrorSet, error)
+ Update(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*v1.ImageTagMirrorSet, error)
+ UpdateStatus(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (*v1.ImageTagMirrorSet, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageTagMirrorSet, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageTagMirrorSetList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageTagMirrorSet, err error)
+ Apply(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error)
+ ApplyStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error)
+ ImageTagMirrorSetExpansion
+}
+
+// imageTagMirrorSets implements ImageTagMirrorSetInterface
+type imageTagMirrorSets struct {
+ client rest.Interface
+}
+
+// newImageTagMirrorSets returns a ImageTagMirrorSets
+func newImageTagMirrorSets(c *ConfigV1Client) *imageTagMirrorSets {
+ return &imageTagMirrorSets{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the imageTagMirrorSet, and returns the corresponding imageTagMirrorSet object, and an error if there is any.
+func (c *imageTagMirrorSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageTagMirrorSet, err error) {
+ result = &v1.ImageTagMirrorSet{}
+ err = c.client.Get().
+ Resource("imagetagmirrorsets").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ImageTagMirrorSets that match those selectors.
+func (c *imageTagMirrorSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageTagMirrorSetList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ImageTagMirrorSetList{}
+ err = c.client.Get().
+ Resource("imagetagmirrorsets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested imageTagMirrorSets.
+func (c *imageTagMirrorSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("imagetagmirrorsets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a imageTagMirrorSet and creates it. Returns the server's representation of the imageTagMirrorSet, and an error, if there is any.
+func (c *imageTagMirrorSets) Create(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.CreateOptions) (result *v1.ImageTagMirrorSet, err error) {
+ result = &v1.ImageTagMirrorSet{}
+ err = c.client.Post().
+ Resource("imagetagmirrorsets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageTagMirrorSet).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a imageTagMirrorSet and updates it. Returns the server's representation of the imageTagMirrorSet, and an error, if there is any.
+func (c *imageTagMirrorSets) Update(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageTagMirrorSet, err error) {
+ result = &v1.ImageTagMirrorSet{}
+ err = c.client.Put().
+ Resource("imagetagmirrorsets").
+ Name(imageTagMirrorSet.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageTagMirrorSet).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *imageTagMirrorSets) UpdateStatus(ctx context.Context, imageTagMirrorSet *v1.ImageTagMirrorSet, opts metav1.UpdateOptions) (result *v1.ImageTagMirrorSet, err error) {
+ result = &v1.ImageTagMirrorSet{}
+ err = c.client.Put().
+ Resource("imagetagmirrorsets").
+ Name(imageTagMirrorSet.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imageTagMirrorSet).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the imageTagMirrorSet and deletes it. Returns an error if one occurs.
+func (c *imageTagMirrorSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("imagetagmirrorsets").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *imageTagMirrorSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("imagetagmirrorsets").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched imageTagMirrorSet.
+func (c *imageTagMirrorSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageTagMirrorSet, err error) {
+ result = &v1.ImageTagMirrorSet{}
+ err = c.client.Patch(pt).
+ Resource("imagetagmirrorsets").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied imageTagMirrorSet.
+func (c *imageTagMirrorSets) Apply(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) {
+ if imageTagMirrorSet == nil {
+ return nil, fmt.Errorf("imageTagMirrorSet provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(imageTagMirrorSet)
+ if err != nil {
+ return nil, err
+ }
+ name := imageTagMirrorSet.Name
+ if name == nil {
+ return nil, fmt.Errorf("imageTagMirrorSet.Name must be provided to Apply")
+ }
+ result = &v1.ImageTagMirrorSet{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("imagetagmirrorsets").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *imageTagMirrorSets) ApplyStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageTagMirrorSet, err error) {
+ if imageTagMirrorSet == nil {
+ return nil, fmt.Errorf("imageTagMirrorSet provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(imageTagMirrorSet)
+ if err != nil {
+ return nil, err
+ }
+
+ name := imageTagMirrorSet.Name
+ if name == nil {
+ return nil, fmt.Errorf("imageTagMirrorSet.Name must be provided to Apply")
+ }
+
+ result = &v1.ImageTagMirrorSet{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("imagetagmirrorsets").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go
new file mode 100644
index 0000000000..c3728aa83d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/infrastructure.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// InfrastructuresGetter has a method to return a InfrastructureInterface.
+// A group's client should implement this interface.
+type InfrastructuresGetter interface {
+ Infrastructures() InfrastructureInterface
+}
+
+// InfrastructureInterface has methods to work with Infrastructure resources.
+type InfrastructureInterface interface {
+ Create(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.CreateOptions) (*v1.Infrastructure, error)
+ Update(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (*v1.Infrastructure, error)
+ UpdateStatus(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (*v1.Infrastructure, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Infrastructure, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.InfrastructureList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Infrastructure, err error)
+ Apply(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error)
+ ApplyStatus(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error)
+ InfrastructureExpansion
+}
+
+// infrastructures implements InfrastructureInterface
+type infrastructures struct {
+ client rest.Interface
+}
+
+// newInfrastructures returns a Infrastructures
+func newInfrastructures(c *ConfigV1Client) *infrastructures {
+ return &infrastructures{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the infrastructure, and returns the corresponding infrastructure object, and an error if there is any.
+func (c *infrastructures) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Infrastructure, err error) {
+ result = &v1.Infrastructure{}
+ err = c.client.Get().
+ Resource("infrastructures").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Infrastructures that match those selectors.
+func (c *infrastructures) List(ctx context.Context, opts metav1.ListOptions) (result *v1.InfrastructureList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.InfrastructureList{}
+ err = c.client.Get().
+ Resource("infrastructures").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested infrastructures.
+func (c *infrastructures) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("infrastructures").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a infrastructure and creates it. Returns the server's representation of the infrastructure, and an error, if there is any.
+func (c *infrastructures) Create(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.CreateOptions) (result *v1.Infrastructure, err error) {
+ result = &v1.Infrastructure{}
+ err = c.client.Post().
+ Resource("infrastructures").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(infrastructure).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a infrastructure and updates it. Returns the server's representation of the infrastructure, and an error, if there is any.
+func (c *infrastructures) Update(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (result *v1.Infrastructure, err error) {
+ result = &v1.Infrastructure{}
+ err = c.client.Put().
+ Resource("infrastructures").
+ Name(infrastructure.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(infrastructure).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *infrastructures) UpdateStatus(ctx context.Context, infrastructure *v1.Infrastructure, opts metav1.UpdateOptions) (result *v1.Infrastructure, err error) {
+ result = &v1.Infrastructure{}
+ err = c.client.Put().
+ Resource("infrastructures").
+ Name(infrastructure.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(infrastructure).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the infrastructure and deletes it. Returns an error if one occurs.
+func (c *infrastructures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("infrastructures").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *infrastructures) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("infrastructures").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched infrastructure.
+func (c *infrastructures) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Infrastructure, err error) {
+ result = &v1.Infrastructure{}
+ err = c.client.Patch(pt).
+ Resource("infrastructures").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied infrastructure.
+func (c *infrastructures) Apply(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) {
+ if infrastructure == nil {
+ return nil, fmt.Errorf("infrastructure provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(infrastructure)
+ if err != nil {
+ return nil, err
+ }
+ name := infrastructure.Name
+ if name == nil {
+ return nil, fmt.Errorf("infrastructure.Name must be provided to Apply")
+ }
+ result = &v1.Infrastructure{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("infrastructures").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *infrastructures) ApplyStatus(ctx context.Context, infrastructure *configv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Infrastructure, err error) {
+ if infrastructure == nil {
+ return nil, fmt.Errorf("infrastructure provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(infrastructure)
+ if err != nil {
+ return nil, err
+ }
+
+ name := infrastructure.Name
+ if name == nil {
+ return nil, fmt.Errorf("infrastructure.Name must be provided to Apply")
+ }
+
+ result = &v1.Infrastructure{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("infrastructures").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go
new file mode 100644
index 0000000000..4d909f8842
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/ingress.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// IngressesGetter has a method to return a IngressInterface.
+// A group's client should implement this interface.
+type IngressesGetter interface {
+ Ingresses() IngressInterface
+}
+
+// IngressInterface has methods to work with Ingress resources.
+type IngressInterface interface {
+ Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error)
+ Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
+ UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Ingress, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error)
+ Apply(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error)
+ ApplyStatus(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error)
+ IngressExpansion
+}
+
+// ingresses implements IngressInterface
+type ingresses struct {
+ client rest.Interface
+}
+
+// newIngresses returns a Ingresses
+func newIngresses(c *ConfigV1Client) *ingresses {
+ return &ingresses{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
+func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Get().
+ Resource("ingresses").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
+func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.IngressList{}
+ err = c.client.Get().
+ Resource("ingresses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ingresses.
+func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ingresses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Post().
+ Resource("ingresses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingress).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Put().
+ Resource("ingresses").
+ Name(ingress.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingress).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Put().
+ Resource("ingresses").
+ Name(ingress.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingress).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
+func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ingresses").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ingresses").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ingress.
+func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Patch(pt).
+ Resource("ingresses").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied ingress.
+func (c *ingresses) Apply(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) {
+ if ingress == nil {
+ return nil, fmt.Errorf("ingress provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(ingress)
+ if err != nil {
+ return nil, err
+ }
+ name := ingress.Name
+ if name == nil {
+ return nil, fmt.Errorf("ingress.Name must be provided to Apply")
+ }
+ result = &v1.Ingress{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("ingresses").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *ingresses) ApplyStatus(ctx context.Context, ingress *configv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) {
+ if ingress == nil {
+ return nil, fmt.Errorf("ingress provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(ingress)
+ if err != nil {
+ return nil, err
+ }
+
+ name := ingress.Name
+ if name == nil {
+ return nil, fmt.Errorf("ingress.Name must be provided to Apply")
+ }
+
+ result = &v1.Ingress{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("ingresses").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go
new file mode 100644
index 0000000000..d9d8a4e475
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/network.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// NetworksGetter has a method to return a NetworkInterface.
+// A group's client should implement this interface.
+type NetworksGetter interface {
+ Networks() NetworkInterface
+}
+
+// NetworkInterface has methods to work with Network resources.
+type NetworkInterface interface {
+ Create(ctx context.Context, network *v1.Network, opts metav1.CreateOptions) (*v1.Network, error)
+ Update(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error)
+ UpdateStatus(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Network, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Network, err error)
+ Apply(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error)
+ ApplyStatus(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error)
+ NetworkExpansion
+}
+
+// networks implements NetworkInterface
+type networks struct {
+ client rest.Interface
+}
+
+// newNetworks returns a Networks
+func newNetworks(c *ConfigV1Client) *networks {
+ return &networks{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the network, and returns the corresponding network object, and an error if there is any.
+func (c *networks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Network, err error) {
+ result = &v1.Network{}
+ err = c.client.Get().
+ Resource("networks").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Networks that match those selectors.
+func (c *networks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.NetworkList{}
+ err = c.client.Get().
+ Resource("networks").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested networks.
+func (c *networks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("networks").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a network and creates it. Returns the server's representation of the network, and an error, if there is any.
+func (c *networks) Create(ctx context.Context, network *v1.Network, opts metav1.CreateOptions) (result *v1.Network, err error) {
+ result = &v1.Network{}
+ err = c.client.Post().
+ Resource("networks").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(network).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a network and updates it. Returns the server's representation of the network, and an error, if there is any.
+func (c *networks) Update(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (result *v1.Network, err error) {
+ result = &v1.Network{}
+ err = c.client.Put().
+ Resource("networks").
+ Name(network.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(network).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *networks) UpdateStatus(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (result *v1.Network, err error) {
+ result = &v1.Network{}
+ err = c.client.Put().
+ Resource("networks").
+ Name(network.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(network).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the network and deletes it. Returns an error if one occurs.
+func (c *networks) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("networks").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *networks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("networks").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched network.
+func (c *networks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Network, err error) {
+ result = &v1.Network{}
+ err = c.client.Patch(pt).
+ Resource("networks").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied network.
+func (c *networks) Apply(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) {
+ if network == nil {
+ return nil, fmt.Errorf("network provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(network)
+ if err != nil {
+ return nil, err
+ }
+ name := network.Name
+ if name == nil {
+ return nil, fmt.Errorf("network.Name must be provided to Apply")
+ }
+ result = &v1.Network{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("networks").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *networks) ApplyStatus(ctx context.Context, network *configv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Network, err error) {
+ if network == nil {
+ return nil, fmt.Errorf("network provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(network)
+ if err != nil {
+ return nil, err
+ }
+
+ name := network.Name
+ if name == nil {
+ return nil, fmt.Errorf("network.Name must be provided to Apply")
+ }
+
+ result = &v1.Network{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("networks").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go
new file mode 100644
index 0000000000..6c7969c5ad
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/node.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// NodesGetter has a method to return a NodeInterface.
+// A group's client should implement this interface.
+type NodesGetter interface {
+ Nodes() NodeInterface
+}
+
+// NodeInterface has methods to work with Node resources.
+type NodeInterface interface {
+ Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error)
+ Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+ UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error)
+ Apply(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error)
+ ApplyStatus(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error)
+ NodeExpansion
+}
+
+// nodes implements NodeInterface
+type nodes struct {
+ client rest.Interface
+}
+
+// newNodes returns a Nodes
+func newNodes(c *ConfigV1Client) *nodes {
+ return &nodes{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the node, and returns the corresponding node object, and an error if there is any.
+func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Get().
+ Resource("nodes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Nodes that match those selectors.
+func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.NodeList{}
+ err = c.client.Get().
+ Resource("nodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested nodes.
+func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("nodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Post().
+ Resource("nodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(node).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Put().
+ Resource("nodes").
+ Name(node.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(node).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Put().
+ Resource("nodes").
+ Name(node.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(node).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the node and deletes it. Returns an error if one occurs.
+func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("nodes").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("nodes").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched node.
+func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Patch(pt).
+ Resource("nodes").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied node.
+func (c *nodes) Apply(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) {
+ if node == nil {
+ return nil, fmt.Errorf("node provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(node)
+ if err != nil {
+ return nil, err
+ }
+ name := node.Name
+ if name == nil {
+ return nil, fmt.Errorf("node.Name must be provided to Apply")
+ }
+ result = &v1.Node{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("nodes").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *nodes) ApplyStatus(ctx context.Context, node *configv1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) {
+ if node == nil {
+ return nil, fmt.Errorf("node provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(node)
+ if err != nil {
+ return nil, err
+ }
+
+ name := node.Name
+ if name == nil {
+ return nil, fmt.Errorf("node.Name must be provided to Apply")
+ }
+
+ result = &v1.Node{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("nodes").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go
new file mode 100644
index 0000000000..b418cc0469
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/oauth.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// OAuthsGetter has a method to return a OAuthInterface.
+// A group's client should implement this interface.
+type OAuthsGetter interface {
+ OAuths() OAuthInterface
+}
+
+// OAuthInterface has methods to work with OAuth resources.
+type OAuthInterface interface {
+ Create(ctx context.Context, oAuth *v1.OAuth, opts metav1.CreateOptions) (*v1.OAuth, error)
+ Update(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (*v1.OAuth, error)
+ UpdateStatus(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (*v1.OAuth, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OAuth, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.OAuthList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OAuth, err error)
+ Apply(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error)
+ ApplyStatus(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error)
+ OAuthExpansion
+}
+
+// oAuths implements OAuthInterface
+type oAuths struct {
+ client rest.Interface
+}
+
+// newOAuths returns a OAuths
+func newOAuths(c *ConfigV1Client) *oAuths {
+ return &oAuths{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the oAuth, and returns the corresponding oAuth object, and an error if there is any.
+func (c *oAuths) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.OAuth, err error) {
+ result = &v1.OAuth{}
+ err = c.client.Get().
+ Resource("oauths").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of OAuths that match those selectors.
+func (c *oAuths) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OAuthList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.OAuthList{}
+ err = c.client.Get().
+ Resource("oauths").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested oAuths.
+func (c *oAuths) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("oauths").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a oAuth and creates it. Returns the server's representation of the oAuth, and an error, if there is any.
+func (c *oAuths) Create(ctx context.Context, oAuth *v1.OAuth, opts metav1.CreateOptions) (result *v1.OAuth, err error) {
+ result = &v1.OAuth{}
+ err = c.client.Post().
+ Resource("oauths").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(oAuth).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a oAuth and updates it. Returns the server's representation of the oAuth, and an error, if there is any.
+func (c *oAuths) Update(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (result *v1.OAuth, err error) {
+ result = &v1.OAuth{}
+ err = c.client.Put().
+ Resource("oauths").
+ Name(oAuth.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(oAuth).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *oAuths) UpdateStatus(ctx context.Context, oAuth *v1.OAuth, opts metav1.UpdateOptions) (result *v1.OAuth, err error) {
+ result = &v1.OAuth{}
+ err = c.client.Put().
+ Resource("oauths").
+ Name(oAuth.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(oAuth).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the oAuth and deletes it. Returns an error if one occurs.
+func (c *oAuths) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("oauths").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *oAuths) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("oauths").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched oAuth.
+func (c *oAuths) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OAuth, err error) {
+ result = &v1.OAuth{}
+ err = c.client.Patch(pt).
+ Resource("oauths").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied oAuth.
+func (c *oAuths) Apply(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) {
+ if oAuth == nil {
+ return nil, fmt.Errorf("oAuth provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(oAuth)
+ if err != nil {
+ return nil, err
+ }
+ name := oAuth.Name
+ if name == nil {
+ return nil, fmt.Errorf("oAuth.Name must be provided to Apply")
+ }
+ result = &v1.OAuth{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("oauths").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *oAuths) ApplyStatus(ctx context.Context, oAuth *configv1.OAuthApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OAuth, err error) {
+ if oAuth == nil {
+ return nil, fmt.Errorf("oAuth provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(oAuth)
+ if err != nil {
+ return nil, err
+ }
+
+ name := oAuth.Name
+ if name == nil {
+ return nil, fmt.Errorf("oAuth.Name must be provided to Apply")
+ }
+
+ result = &v1.OAuth{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("oauths").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go
new file mode 100644
index 0000000000..67b7e0f893
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/operatorhub.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// OperatorHubsGetter has a method to return a OperatorHubInterface.
+// A group's client should implement this interface.
+type OperatorHubsGetter interface {
+ OperatorHubs() OperatorHubInterface
+}
+
+// OperatorHubInterface has methods to work with OperatorHub resources.
+type OperatorHubInterface interface {
+ Create(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.CreateOptions) (*v1.OperatorHub, error)
+ Update(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (*v1.OperatorHub, error)
+ UpdateStatus(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (*v1.OperatorHub, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OperatorHub, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.OperatorHubList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OperatorHub, err error)
+ Apply(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error)
+ ApplyStatus(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error)
+ OperatorHubExpansion
+}
+
+// operatorHubs implements OperatorHubInterface
+type operatorHubs struct {
+ client rest.Interface
+}
+
+// newOperatorHubs returns a OperatorHubs
+func newOperatorHubs(c *ConfigV1Client) *operatorHubs {
+ return &operatorHubs{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the operatorHub, and returns the corresponding operatorHub object, and an error if there is any.
+func (c *operatorHubs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.OperatorHub, err error) {
+ result = &v1.OperatorHub{}
+ err = c.client.Get().
+ Resource("operatorhubs").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of OperatorHubs that match those selectors.
+func (c *operatorHubs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OperatorHubList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.OperatorHubList{}
+ err = c.client.Get().
+ Resource("operatorhubs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested operatorHubs.
+func (c *operatorHubs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("operatorhubs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a operatorHub and creates it. Returns the server's representation of the operatorHub, and an error, if there is any.
+func (c *operatorHubs) Create(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.CreateOptions) (result *v1.OperatorHub, err error) {
+ result = &v1.OperatorHub{}
+ err = c.client.Post().
+ Resource("operatorhubs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(operatorHub).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a operatorHub and updates it. Returns the server's representation of the operatorHub, and an error, if there is any.
+func (c *operatorHubs) Update(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (result *v1.OperatorHub, err error) {
+ result = &v1.OperatorHub{}
+ err = c.client.Put().
+ Resource("operatorhubs").
+ Name(operatorHub.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(operatorHub).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *operatorHubs) UpdateStatus(ctx context.Context, operatorHub *v1.OperatorHub, opts metav1.UpdateOptions) (result *v1.OperatorHub, err error) {
+ result = &v1.OperatorHub{}
+ err = c.client.Put().
+ Resource("operatorhubs").
+ Name(operatorHub.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(operatorHub).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the operatorHub and deletes it. Returns an error if one occurs.
+func (c *operatorHubs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("operatorhubs").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *operatorHubs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("operatorhubs").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched operatorHub.
+func (c *operatorHubs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OperatorHub, err error) {
+ result = &v1.OperatorHub{}
+ err = c.client.Patch(pt).
+ Resource("operatorhubs").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied operatorHub.
+func (c *operatorHubs) Apply(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) {
+ if operatorHub == nil {
+ return nil, fmt.Errorf("operatorHub provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(operatorHub)
+ if err != nil {
+ return nil, err
+ }
+ name := operatorHub.Name
+ if name == nil {
+ return nil, fmt.Errorf("operatorHub.Name must be provided to Apply")
+ }
+ result = &v1.OperatorHub{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("operatorhubs").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *operatorHubs) ApplyStatus(ctx context.Context, operatorHub *configv1.OperatorHubApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OperatorHub, err error) {
+ if operatorHub == nil {
+ return nil, fmt.Errorf("operatorHub provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(operatorHub)
+ if err != nil {
+ return nil, err
+ }
+
+ name := operatorHub.Name
+ if name == nil {
+ return nil, fmt.Errorf("operatorHub.Name must be provided to Apply")
+ }
+
+ result = &v1.OperatorHub{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("operatorhubs").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go
new file mode 100644
index 0000000000..cada42734b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/project.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ProjectsGetter has a method to return a ProjectInterface.
+// A group's client should implement this interface.
+type ProjectsGetter interface {
+ Projects() ProjectInterface
+}
+
+// ProjectInterface has methods to work with Project resources.
+type ProjectInterface interface {
+ Create(ctx context.Context, project *v1.Project, opts metav1.CreateOptions) (*v1.Project, error)
+ Update(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (*v1.Project, error)
+ UpdateStatus(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (*v1.Project, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Project, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ProjectList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Project, err error)
+ Apply(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error)
+ ApplyStatus(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error)
+ ProjectExpansion
+}
+
+// projects implements ProjectInterface
+type projects struct {
+ client rest.Interface
+}
+
+// newProjects returns a Projects
+func newProjects(c *ConfigV1Client) *projects {
+ return &projects{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the project, and returns the corresponding project object, and an error if there is any.
+func (c *projects) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Project, err error) {
+ result = &v1.Project{}
+ err = c.client.Get().
+ Resource("projects").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Projects that match those selectors.
+func (c *projects) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ProjectList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ProjectList{}
+ err = c.client.Get().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested projects.
+func (c *projects) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any.
+func (c *projects) Create(ctx context.Context, project *v1.Project, opts metav1.CreateOptions) (result *v1.Project, err error) {
+ result = &v1.Project{}
+ err = c.client.Post().
+ Resource("projects").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any.
+func (c *projects) Update(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (result *v1.Project, err error) {
+ result = &v1.Project{}
+ err = c.client.Put().
+ Resource("projects").
+ Name(project.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *projects) UpdateStatus(ctx context.Context, project *v1.Project, opts metav1.UpdateOptions) (result *v1.Project, err error) {
+ result = &v1.Project{}
+ err = c.client.Put().
+ Resource("projects").
+ Name(project.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(project).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the project and deletes it. Returns an error if one occurs.
+func (c *projects) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("projects").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *projects) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("projects").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched project.
+func (c *projects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Project, err error) {
+ result = &v1.Project{}
+ err = c.client.Patch(pt).
+ Resource("projects").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied project.
+func (c *projects) Apply(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) {
+ if project == nil {
+ return nil, fmt.Errorf("project provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(project)
+ if err != nil {
+ return nil, err
+ }
+ name := project.Name
+ if name == nil {
+ return nil, fmt.Errorf("project.Name must be provided to Apply")
+ }
+ result = &v1.Project{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("projects").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *projects) ApplyStatus(ctx context.Context, project *configv1.ProjectApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Project, err error) {
+ if project == nil {
+ return nil, fmt.Errorf("project provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(project)
+ if err != nil {
+ return nil, err
+ }
+
+ name := project.Name
+ if name == nil {
+ return nil, fmt.Errorf("project.Name must be provided to Apply")
+ }
+
+ result = &v1.Project{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("projects").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go
new file mode 100644
index 0000000000..a187dc8fc7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ProxiesGetter has a method to return a ProxyInterface.
+// A group's client should implement this interface.
+type ProxiesGetter interface {
+ Proxies() ProxyInterface
+}
+
+// ProxyInterface has methods to work with Proxy resources.
+type ProxyInterface interface {
+ Create(ctx context.Context, proxy *v1.Proxy, opts metav1.CreateOptions) (*v1.Proxy, error)
+ Update(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (*v1.Proxy, error)
+ UpdateStatus(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (*v1.Proxy, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Proxy, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ProxyList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Proxy, err error)
+ Apply(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error)
+ ApplyStatus(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error)
+ ProxyExpansion
+}
+
+// proxies implements ProxyInterface
+type proxies struct {
+ client rest.Interface
+}
+
+// newProxies returns a Proxies
+func newProxies(c *ConfigV1Client) *proxies {
+ return &proxies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the proxy, and returns the corresponding proxy object, and an error if there is any.
+func (c *proxies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Proxy, err error) {
+ result = &v1.Proxy{}
+ err = c.client.Get().
+ Resource("proxies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Proxies that match those selectors.
+func (c *proxies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ProxyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ProxyList{}
+ err = c.client.Get().
+ Resource("proxies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested proxies.
+func (c *proxies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("proxies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a proxy and creates it. Returns the server's representation of the proxy, and an error, if there is any.
+func (c *proxies) Create(ctx context.Context, proxy *v1.Proxy, opts metav1.CreateOptions) (result *v1.Proxy, err error) {
+ result = &v1.Proxy{}
+ err = c.client.Post().
+ Resource("proxies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(proxy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a proxy and updates it. Returns the server's representation of the proxy, and an error, if there is any.
+func (c *proxies) Update(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (result *v1.Proxy, err error) {
+ result = &v1.Proxy{}
+ err = c.client.Put().
+ Resource("proxies").
+ Name(proxy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(proxy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *proxies) UpdateStatus(ctx context.Context, proxy *v1.Proxy, opts metav1.UpdateOptions) (result *v1.Proxy, err error) {
+ result = &v1.Proxy{}
+ err = c.client.Put().
+ Resource("proxies").
+ Name(proxy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(proxy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the proxy and deletes it. Returns an error if one occurs.
+func (c *proxies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("proxies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *proxies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("proxies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched proxy.
+func (c *proxies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Proxy, err error) {
+ result = &v1.Proxy{}
+ err = c.client.Patch(pt).
+ Resource("proxies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied proxy.
+func (c *proxies) Apply(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) {
+ if proxy == nil {
+ return nil, fmt.Errorf("proxy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(proxy)
+ if err != nil {
+ return nil, err
+ }
+ name := proxy.Name
+ if name == nil {
+ return nil, fmt.Errorf("proxy.Name must be provided to Apply")
+ }
+ result = &v1.Proxy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("proxies").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *proxies) ApplyStatus(ctx context.Context, proxy *configv1.ProxyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Proxy, err error) {
+ if proxy == nil {
+ return nil, fmt.Errorf("proxy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(proxy)
+ if err != nil {
+ return nil, err
+ }
+
+ name := proxy.Name
+ if name == nil {
+ return nil, fmt.Errorf("proxy.Name must be provided to Apply")
+ }
+
+ result = &v1.Proxy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("proxies").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go
new file mode 100644
index 0000000000..da67a4f061
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/scheduler.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1 "github.com/openshift/api/config/v1"
+ configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// SchedulersGetter has a method to return a SchedulerInterface.
+// A group's client should implement this interface.
+type SchedulersGetter interface {
+ Schedulers() SchedulerInterface
+}
+
+// SchedulerInterface has methods to work with Scheduler resources.
+type SchedulerInterface interface {
+ Create(ctx context.Context, scheduler *v1.Scheduler, opts metav1.CreateOptions) (*v1.Scheduler, error)
+ Update(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (*v1.Scheduler, error)
+ UpdateStatus(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (*v1.Scheduler, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Scheduler, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.SchedulerList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Scheduler, err error)
+ Apply(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error)
+ ApplyStatus(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error)
+ SchedulerExpansion
+}
+
+// schedulers implements SchedulerInterface
+type schedulers struct {
+ client rest.Interface
+}
+
+// newSchedulers returns a Schedulers
+func newSchedulers(c *ConfigV1Client) *schedulers {
+ return &schedulers{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the scheduler, and returns the corresponding scheduler object, and an error if there is any.
+func (c *schedulers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Scheduler, err error) {
+ result = &v1.Scheduler{}
+ err = c.client.Get().
+ Resource("schedulers").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Schedulers that match those selectors.
+func (c *schedulers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SchedulerList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.SchedulerList{}
+ err = c.client.Get().
+ Resource("schedulers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested schedulers.
+func (c *schedulers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("schedulers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a scheduler and creates it. Returns the server's representation of the scheduler, and an error, if there is any.
+func (c *schedulers) Create(ctx context.Context, scheduler *v1.Scheduler, opts metav1.CreateOptions) (result *v1.Scheduler, err error) {
+ result = &v1.Scheduler{}
+ err = c.client.Post().
+ Resource("schedulers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(scheduler).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a scheduler and updates it. Returns the server's representation of the scheduler, and an error, if there is any.
+func (c *schedulers) Update(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (result *v1.Scheduler, err error) {
+ result = &v1.Scheduler{}
+ err = c.client.Put().
+ Resource("schedulers").
+ Name(scheduler.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(scheduler).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *schedulers) UpdateStatus(ctx context.Context, scheduler *v1.Scheduler, opts metav1.UpdateOptions) (result *v1.Scheduler, err error) {
+ result = &v1.Scheduler{}
+ err = c.client.Put().
+ Resource("schedulers").
+ Name(scheduler.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(scheduler).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the scheduler and deletes it. Returns an error if one occurs.
+func (c *schedulers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("schedulers").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *schedulers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("schedulers").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched scheduler.
+func (c *schedulers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Scheduler, err error) {
+ result = &v1.Scheduler{}
+ err = c.client.Patch(pt).
+ Resource("schedulers").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied scheduler.
+func (c *schedulers) Apply(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) {
+ if scheduler == nil {
+ return nil, fmt.Errorf("scheduler provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scheduler)
+ if err != nil {
+ return nil, err
+ }
+ name := scheduler.Name
+ if name == nil {
+ return nil, fmt.Errorf("scheduler.Name must be provided to Apply")
+ }
+ result = &v1.Scheduler{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("schedulers").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *schedulers) ApplyStatus(ctx context.Context, scheduler *configv1.SchedulerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Scheduler, err error) {
+ if scheduler == nil {
+ return nil, fmt.Errorf("scheduler provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scheduler)
+ if err != nil {
+ return nil, err
+ }
+
+ name := scheduler.Name
+ if name == nil {
+ return nil, fmt.Errorf("scheduler.Name must be provided to Apply")
+ }
+
+ result = &v1.Scheduler{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("schedulers").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go
new file mode 100644
index 0000000000..8209afbb52
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/backup.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// BackupsGetter has a method to return a BackupInterface.
+// A group's client should implement this interface.
+type BackupsGetter interface {
+ Backups() BackupInterface
+}
+
+// BackupInterface has methods to work with Backup resources.
+type BackupInterface interface {
+ Create(ctx context.Context, backup *v1alpha1.Backup, opts v1.CreateOptions) (*v1alpha1.Backup, error)
+ Update(ctx context.Context, backup *v1alpha1.Backup, opts v1.UpdateOptions) (*v1alpha1.Backup, error)
+ UpdateStatus(ctx context.Context, backup *v1alpha1.Backup, opts v1.UpdateOptions) (*v1alpha1.Backup, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Backup, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.BackupList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Backup, err error)
+ Apply(ctx context.Context, backup *configv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Backup, err error)
+ ApplyStatus(ctx context.Context, backup *configv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Backup, err error)
+ BackupExpansion
+}
+
+// backups implements BackupInterface
+type backups struct {
+ client rest.Interface
+}
+
+// newBackups returns a Backups
+func newBackups(c *ConfigV1alpha1Client) *backups {
+ return &backups{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the backup, and returns the corresponding backup object, and an error if there is any.
+func (c *backups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Backup, err error) {
+ result = &v1alpha1.Backup{}
+ err = c.client.Get().
+ Resource("backups").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Backups that match those selectors.
+func (c *backups) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.BackupList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.BackupList{}
+ err = c.client.Get().
+ Resource("backups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested backups.
+func (c *backups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("backups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a backup and creates it. Returns the server's representation of the backup, and an error, if there is any.
+func (c *backups) Create(ctx context.Context, backup *v1alpha1.Backup, opts v1.CreateOptions) (result *v1alpha1.Backup, err error) {
+ result = &v1alpha1.Backup{}
+ err = c.client.Post().
+ Resource("backups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a backup and updates it. Returns the server's representation of the backup, and an error, if there is any.
+func (c *backups) Update(ctx context.Context, backup *v1alpha1.Backup, opts v1.UpdateOptions) (result *v1alpha1.Backup, err error) {
+ result = &v1alpha1.Backup{}
+ err = c.client.Put().
+ Resource("backups").
+ Name(backup.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *backups) UpdateStatus(ctx context.Context, backup *v1alpha1.Backup, opts v1.UpdateOptions) (result *v1alpha1.Backup, err error) {
+ result = &v1alpha1.Backup{}
+ err = c.client.Put().
+ Resource("backups").
+ Name(backup.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(backup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the backup and deletes it. Returns an error if one occurs.
+func (c *backups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("backups").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *backups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("backups").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched backup.
+func (c *backups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Backup, err error) {
+ result = &v1alpha1.Backup{}
+ err = c.client.Patch(pt).
+ Resource("backups").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied backup.
+func (c *backups) Apply(ctx context.Context, backup *configv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Backup, err error) {
+ if backup == nil {
+ return nil, fmt.Errorf("backup provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(backup)
+ if err != nil {
+ return nil, err
+ }
+ name := backup.Name
+ if name == nil {
+ return nil, fmt.Errorf("backup.Name must be provided to Apply")
+ }
+ result = &v1alpha1.Backup{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("backups").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *backups) ApplyStatus(ctx context.Context, backup *configv1alpha1.BackupApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Backup, err error) {
+ if backup == nil {
+ return nil, fmt.Errorf("backup provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(backup)
+ if err != nil {
+ return nil, err
+ }
+
+ name := backup.Name
+ if name == nil {
+ return nil, fmt.Errorf("backup.Name must be provided to Apply")
+ }
+
+ result = &v1alpha1.Backup{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("backups").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go
new file mode 100644
index 0000000000..89ed7717e4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/clusterimagepolicy.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClusterImagePoliciesGetter has a method to return a ClusterImagePolicyInterface.
+// A group's client should implement this interface.
+type ClusterImagePoliciesGetter interface {
+ ClusterImagePolicies() ClusterImagePolicyInterface
+}
+
+// ClusterImagePolicyInterface has methods to work with ClusterImagePolicy resources.
+type ClusterImagePolicyInterface interface {
+ Create(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.CreateOptions) (*v1alpha1.ClusterImagePolicy, error)
+ Update(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ClusterImagePolicy, error)
+ UpdateStatus(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ClusterImagePolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterImagePolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterImagePolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterImagePolicy, err error)
+ Apply(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterImagePolicy, err error)
+ ApplyStatus(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterImagePolicy, err error)
+ ClusterImagePolicyExpansion
+}
+
+// clusterImagePolicies implements ClusterImagePolicyInterface
+type clusterImagePolicies struct {
+ client rest.Interface
+}
+
+// newClusterImagePolicies returns a ClusterImagePolicies
+func newClusterImagePolicies(c *ConfigV1alpha1Client) *clusterImagePolicies {
+ return &clusterImagePolicies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the clusterImagePolicy, and returns the corresponding clusterImagePolicy object, and an error if there is any.
+func (c *clusterImagePolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterImagePolicy, err error) {
+ result = &v1alpha1.ClusterImagePolicy{}
+ err = c.client.Get().
+ Resource("clusterimagepolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ClusterImagePolicies that match those selectors.
+func (c *clusterImagePolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterImagePolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ClusterImagePolicyList{}
+ err = c.client.Get().
+ Resource("clusterimagepolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clusterImagePolicies.
+func (c *clusterImagePolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("clusterimagepolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a clusterImagePolicy and creates it. Returns the server's representation of the clusterImagePolicy, and an error, if there is any.
+func (c *clusterImagePolicies) Create(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.CreateOptions) (result *v1alpha1.ClusterImagePolicy, err error) {
+ result = &v1alpha1.ClusterImagePolicy{}
+ err = c.client.Post().
+ Resource("clusterimagepolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterImagePolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a clusterImagePolicy and updates it. Returns the server's representation of the clusterImagePolicy, and an error, if there is any.
+func (c *clusterImagePolicies) Update(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (result *v1alpha1.ClusterImagePolicy, err error) {
+ result = &v1alpha1.ClusterImagePolicy{}
+ err = c.client.Put().
+ Resource("clusterimagepolicies").
+ Name(clusterImagePolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterImagePolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clusterImagePolicies) UpdateStatus(ctx context.Context, clusterImagePolicy *v1alpha1.ClusterImagePolicy, opts v1.UpdateOptions) (result *v1alpha1.ClusterImagePolicy, err error) {
+ result = &v1alpha1.ClusterImagePolicy{}
+ err = c.client.Put().
+ Resource("clusterimagepolicies").
+ Name(clusterImagePolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clusterImagePolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the clusterImagePolicy and deletes it. Returns an error if one occurs.
+func (c *clusterImagePolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("clusterimagepolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clusterImagePolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("clusterimagepolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched clusterImagePolicy.
+func (c *clusterImagePolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterImagePolicy, err error) {
+ result = &v1alpha1.ClusterImagePolicy{}
+ err = c.client.Patch(pt).
+ Resource("clusterimagepolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied clusterImagePolicy.
+func (c *clusterImagePolicies) Apply(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterImagePolicy, err error) {
+ if clusterImagePolicy == nil {
+ return nil, fmt.Errorf("clusterImagePolicy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(clusterImagePolicy)
+ if err != nil {
+ return nil, err
+ }
+ name := clusterImagePolicy.Name
+ if name == nil {
+ return nil, fmt.Errorf("clusterImagePolicy.Name must be provided to Apply")
+ }
+ result = &v1alpha1.ClusterImagePolicy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("clusterimagepolicies").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *clusterImagePolicies) ApplyStatus(ctx context.Context, clusterImagePolicy *configv1alpha1.ClusterImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterImagePolicy, err error) {
+ if clusterImagePolicy == nil {
+ return nil, fmt.Errorf("clusterImagePolicy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(clusterImagePolicy)
+ if err != nil {
+ return nil, err
+ }
+
+ name := clusterImagePolicy.Name
+ if name == nil {
+ return nil, fmt.Errorf("clusterImagePolicy.Name must be provided to Apply")
+ }
+
+ result = &v1alpha1.ClusterImagePolicy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("clusterimagepolicies").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go
new file mode 100644
index 0000000000..cfbbd848bb
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go
@@ -0,0 +1,106 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "net/http"
+
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type ConfigV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ BackupsGetter
+ ClusterImagePoliciesGetter
+ ImagePoliciesGetter
+ InsightsDataGathersGetter
+}
+
+// ConfigV1alpha1Client is used to interact with features provided by the config.openshift.io group.
+type ConfigV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *ConfigV1alpha1Client) Backups() BackupInterface {
+ return newBackups(c)
+}
+
+func (c *ConfigV1alpha1Client) ClusterImagePolicies() ClusterImagePolicyInterface {
+ return newClusterImagePolicies(c)
+}
+
+func (c *ConfigV1alpha1Client) ImagePolicies(namespace string) ImagePolicyInterface {
+ return newImagePolicies(c, namespace)
+}
+
+func (c *ConfigV1alpha1Client) InsightsDataGathers() InsightsDataGatherInterface {
+ return newInsightsDataGathers(c)
+}
+
+// NewForConfig creates a new ConfigV1alpha1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*ConfigV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new ConfigV1alpha1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &ConfigV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ConfigV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ConfigV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new ConfigV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *ConfigV1alpha1Client {
+ return &ConfigV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ConfigV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go
new file mode 100644
index 0000000000..93a7ca4e0e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go
@@ -0,0 +1,4 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000000..3a69741b1d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go
@@ -0,0 +1,11 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type BackupExpansion interface{}
+
+type ClusterImagePolicyExpansion interface{}
+
+type ImagePolicyExpansion interface{}
+
+type InsightsDataGatherExpansion interface{}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go
new file mode 100644
index 0000000000..a67969cf3b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/imagepolicy.go
@@ -0,0 +1,240 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ImagePoliciesGetter has a method to return a ImagePolicyInterface.
+// A group's client should implement this interface.
+type ImagePoliciesGetter interface {
+ ImagePolicies(namespace string) ImagePolicyInterface
+}
+
+// ImagePolicyInterface has methods to work with ImagePolicy resources.
+type ImagePolicyInterface interface {
+ Create(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.CreateOptions) (*v1alpha1.ImagePolicy, error)
+ Update(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ImagePolicy, error)
+ UpdateStatus(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.UpdateOptions) (*v1alpha1.ImagePolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ImagePolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ImagePolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImagePolicy, err error)
+ Apply(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ImagePolicy, err error)
+ ApplyStatus(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ImagePolicy, err error)
+ ImagePolicyExpansion
+}
+
+// imagePolicies implements ImagePolicyInterface
+type imagePolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newImagePolicies returns a ImagePolicies
+func newImagePolicies(c *ConfigV1alpha1Client, namespace string) *imagePolicies {
+ return &imagePolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the imagePolicy, and returns the corresponding imagePolicy object, and an error if there is any.
+func (c *imagePolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ImagePolicy, err error) {
+ result = &v1alpha1.ImagePolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ImagePolicies that match those selectors.
+func (c *imagePolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ImagePolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ImagePolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested imagePolicies.
+func (c *imagePolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a imagePolicy and creates it. Returns the server's representation of the imagePolicy, and an error, if there is any.
+func (c *imagePolicies) Create(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.CreateOptions) (result *v1alpha1.ImagePolicy, err error) {
+ result = &v1alpha1.ImagePolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imagePolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a imagePolicy and updates it. Returns the server's representation of the imagePolicy, and an error, if there is any.
+func (c *imagePolicies) Update(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.UpdateOptions) (result *v1alpha1.ImagePolicy, err error) {
+ result = &v1alpha1.ImagePolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ Name(imagePolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imagePolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *imagePolicies) UpdateStatus(ctx context.Context, imagePolicy *v1alpha1.ImagePolicy, opts v1.UpdateOptions) (result *v1alpha1.ImagePolicy, err error) {
+ result = &v1alpha1.ImagePolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ Name(imagePolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(imagePolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the imagePolicy and deletes it. Returns an error if one occurs.
+func (c *imagePolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *imagePolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched imagePolicy.
+func (c *imagePolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImagePolicy, err error) {
+ result = &v1alpha1.ImagePolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied imagePolicy.
+func (c *imagePolicies) Apply(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ImagePolicy, err error) {
+ if imagePolicy == nil {
+ return nil, fmt.Errorf("imagePolicy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(imagePolicy)
+ if err != nil {
+ return nil, err
+ }
+ name := imagePolicy.Name
+ if name == nil {
+ return nil, fmt.Errorf("imagePolicy.Name must be provided to Apply")
+ }
+ result = &v1alpha1.ImagePolicy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *imagePolicies) ApplyStatus(ctx context.Context, imagePolicy *configv1alpha1.ImagePolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ImagePolicy, err error) {
+ if imagePolicy == nil {
+ return nil, fmt.Errorf("imagePolicy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(imagePolicy)
+ if err != nil {
+ return nil, err
+ }
+
+ name := imagePolicy.Name
+ if name == nil {
+ return nil, fmt.Errorf("imagePolicy.Name must be provided to Apply")
+ }
+
+ result = &v1alpha1.ImagePolicy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("imagepolicies").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go
new file mode 100644
index 0000000000..e3e66488a8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go
@@ -0,0 +1,227 @@
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1"
+ scheme "github.com/openshift/client-go/config/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// InsightsDataGathersGetter has a method to return a InsightsDataGatherInterface.
+// A group's client should implement this interface.
+type InsightsDataGathersGetter interface {
+ InsightsDataGathers() InsightsDataGatherInterface
+}
+
+// InsightsDataGatherInterface has methods to work with InsightsDataGather resources.
+type InsightsDataGatherInterface interface {
+ Create(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.CreateOptions) (*v1alpha1.InsightsDataGather, error)
+ Update(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*v1alpha1.InsightsDataGather, error)
+ UpdateStatus(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*v1alpha1.InsightsDataGather, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.InsightsDataGather, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.InsightsDataGatherList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InsightsDataGather, err error)
+ Apply(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error)
+ ApplyStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error)
+ InsightsDataGatherExpansion
+}
+
+// insightsDataGathers implements InsightsDataGatherInterface
+type insightsDataGathers struct {
+ client rest.Interface
+}
+
+// newInsightsDataGathers returns a InsightsDataGathers
+func newInsightsDataGathers(c *ConfigV1alpha1Client) *insightsDataGathers {
+ return &insightsDataGathers{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the insightsDataGather, and returns the corresponding insightsDataGather object, and an error if there is any.
+func (c *insightsDataGathers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.InsightsDataGather, err error) {
+ result = &v1alpha1.InsightsDataGather{}
+ err = c.client.Get().
+ Resource("insightsdatagathers").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of InsightsDataGathers that match those selectors.
+func (c *insightsDataGathers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.InsightsDataGatherList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.InsightsDataGatherList{}
+ err = c.client.Get().
+ Resource("insightsdatagathers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested insightsDataGathers.
+func (c *insightsDataGathers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("insightsdatagathers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a insightsDataGather and creates it. Returns the server's representation of the insightsDataGather, and an error, if there is any.
+func (c *insightsDataGathers) Create(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.CreateOptions) (result *v1alpha1.InsightsDataGather, err error) {
+ result = &v1alpha1.InsightsDataGather{}
+ err = c.client.Post().
+ Resource("insightsdatagathers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(insightsDataGather).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a insightsDataGather and updates it. Returns the server's representation of the insightsDataGather, and an error, if there is any.
+func (c *insightsDataGathers) Update(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (result *v1alpha1.InsightsDataGather, err error) {
+ result = &v1alpha1.InsightsDataGather{}
+ err = c.client.Put().
+ Resource("insightsdatagathers").
+ Name(insightsDataGather.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(insightsDataGather).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *insightsDataGathers) UpdateStatus(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (result *v1alpha1.InsightsDataGather, err error) {
+ result = &v1alpha1.InsightsDataGather{}
+ err = c.client.Put().
+ Resource("insightsdatagathers").
+ Name(insightsDataGather.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(insightsDataGather).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the insightsDataGather and deletes it. Returns an error if one occurs.
+func (c *insightsDataGathers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("insightsdatagathers").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *insightsDataGathers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("insightsdatagathers").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched insightsDataGather.
+func (c *insightsDataGathers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InsightsDataGather, err error) {
+ result = &v1alpha1.InsightsDataGather{}
+ err = c.client.Patch(pt).
+ Resource("insightsdatagathers").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied insightsDataGather.
+func (c *insightsDataGathers) Apply(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) {
+ if insightsDataGather == nil {
+ return nil, fmt.Errorf("insightsDataGather provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(insightsDataGather)
+ if err != nil {
+ return nil, err
+ }
+ name := insightsDataGather.Name
+ if name == nil {
+ return nil, fmt.Errorf("insightsDataGather.Name must be provided to Apply")
+ }
+ result = &v1alpha1.InsightsDataGather{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("insightsdatagathers").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *insightsDataGathers) ApplyStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) {
+ if insightsDataGather == nil {
+ return nil, fmt.Errorf("insightsDataGather provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(insightsDataGather)
+ if err != nil {
+ return nil, err
+ }
+
+ name := insightsDataGather.Name
+ if name == nil {
+ return nil, fmt.Errorf("insightsDataGather.Name must be provided to Apply")
+ }
+
+ result = &v1alpha1.InsightsDataGather{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("insightsdatagathers").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go
new file mode 100644
index 0000000000..3e7e6e8d3b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go
@@ -0,0 +1,38 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package config
+
+import (
+ v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1"
+ v1alpha1 "github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
+ // V1alpha1 provides access to shared informers for resources in V1alpha1.
+ V1alpha1() v1alpha1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
+// V1alpha1 returns a new v1alpha1.Interface.
+func (g *group) V1alpha1() v1alpha1.Interface {
+ return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go
new file mode 100644
index 0000000000..2fcff23129
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// APIServerInformer provides access to a shared informer and lister for
+// APIServers.
+type APIServerInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.APIServerLister
+}
+
+type aPIServerInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewAPIServerInformer constructs a new informer for APIServer type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewAPIServerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredAPIServerInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredAPIServerInformer constructs a new informer for APIServer type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredAPIServerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().APIServers().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().APIServers().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.APIServer{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *aPIServerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredAPIServerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *aPIServerInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.APIServer{}, f.defaultInformer)
+}
+
+func (f *aPIServerInformer) Lister() v1.APIServerLister {
+ return v1.NewAPIServerLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go
new file mode 100644
index 0000000000..c2792cf8ff
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// AuthenticationInformer provides access to a shared informer and lister for
+// Authentications.
+type AuthenticationInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.AuthenticationLister
+}
+
+type authenticationInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewAuthenticationInformer constructs a new informer for Authentication type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewAuthenticationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredAuthenticationInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredAuthenticationInformer constructs a new informer for Authentication type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredAuthenticationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Authentications().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Authentications().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Authentication{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *authenticationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredAuthenticationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *authenticationInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Authentication{}, f.defaultInformer)
+}
+
+func (f *authenticationInformer) Lister() v1.AuthenticationLister {
+ return v1.NewAuthenticationLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go
new file mode 100644
index 0000000000..c944db0652
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// BuildInformer provides access to a shared informer and lister for
+// Builds.
+type BuildInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.BuildLister
+}
+
+type buildInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewBuildInformer constructs a new informer for Build type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredBuildInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredBuildInformer constructs a new informer for Build type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Builds().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Builds().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Build{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredBuildInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *buildInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Build{}, f.defaultInformer)
+}
+
+func (f *buildInformer) Lister() v1.BuildLister {
+ return v1.NewBuildLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go
new file mode 100644
index 0000000000..4c81309fb7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ClusterOperatorInformer provides access to a shared informer and lister for
+// ClusterOperators.
+type ClusterOperatorInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ClusterOperatorLister
+}
+
+type clusterOperatorInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewClusterOperatorInformer constructs a new informer for ClusterOperator type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewClusterOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredClusterOperatorInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredClusterOperatorInformer constructs a new informer for ClusterOperator type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredClusterOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ClusterOperators().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ClusterOperators().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.ClusterOperator{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *clusterOperatorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredClusterOperatorInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *clusterOperatorInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.ClusterOperator{}, f.defaultInformer)
+}
+
+func (f *clusterOperatorInformer) Lister() v1.ClusterOperatorLister {
+ return v1.NewClusterOperatorLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go
new file mode 100644
index 0000000000..8015d6eed7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ClusterVersionInformer provides access to a shared informer and lister for
+// ClusterVersions.
+type ClusterVersionInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ClusterVersionLister
+}
+
+type clusterVersionInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewClusterVersionInformer constructs a new informer for ClusterVersion type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewClusterVersionInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredClusterVersionInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredClusterVersionInformer constructs a new informer for ClusterVersion type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredClusterVersionInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ClusterVersions().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ClusterVersions().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.ClusterVersion{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *clusterVersionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredClusterVersionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *clusterVersionInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.ClusterVersion{}, f.defaultInformer)
+}
+
+func (f *clusterVersionInformer) Lister() v1.ClusterVersionLister {
+ return v1.NewClusterVersionLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go
new file mode 100644
index 0000000000..7d23130a44
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ConsoleInformer provides access to a shared informer and lister for
+// Consoles.
+type ConsoleInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ConsoleLister
+}
+
+type consoleInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewConsoleInformer constructs a new informer for Console type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewConsoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredConsoleInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredConsoleInformer constructs a new informer for Console type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredConsoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Consoles().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Consoles().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Console{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *consoleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredConsoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *consoleInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Console{}, f.defaultInformer)
+}
+
+func (f *consoleInformer) Lister() v1.ConsoleLister {
+ return v1.NewConsoleLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go
new file mode 100644
index 0000000000..ddadf98cb8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// DNSInformer provides access to a shared informer and lister for
+// DNSes.
+type DNSInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.DNSLister
+}
+
+type dNSInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewDNSInformer constructs a new informer for DNS type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewDNSInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredDNSInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredDNSInformer constructs a new informer for DNS type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredDNSInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().DNSes().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().DNSes().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.DNS{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *dNSInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredDNSInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *dNSInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.DNS{}, f.defaultInformer)
+}
+
+func (f *dNSInformer) Lister() v1.DNSLister {
+ return v1.NewDNSLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go
new file mode 100644
index 0000000000..84cec90afa
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// FeatureGateInformer provides access to a shared informer and lister for
+// FeatureGates.
+type FeatureGateInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.FeatureGateLister
+}
+
+type featureGateInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewFeatureGateInformer constructs a new informer for FeatureGate type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredFeatureGateInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredFeatureGateInformer constructs a new informer for FeatureGate type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().FeatureGates().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().FeatureGates().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.FeatureGate{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *featureGateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredFeatureGateInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *featureGateInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.FeatureGate{}, f.defaultInformer)
+}
+
+func (f *featureGateInformer) Lister() v1.FeatureGateLister {
+ return v1.NewFeatureGateLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go
new file mode 100644
index 0000000000..e7a3ecc210
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ImageInformer provides access to a shared informer and lister for
+// Images.
+type ImageInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ImageLister
+}
+
+type imageInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewImageInformer constructs a new informer for Image type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredImageInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredImageInformer constructs a new informer for Image type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Images().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Images().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Image{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *imageInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Image{}, f.defaultInformer)
+}
+
+func (f *imageInformer) Lister() v1.ImageLister {
+ return v1.NewImageLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go
new file mode 100644
index 0000000000..c50ea7b1b2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagecontentpolicy.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ImageContentPolicyInformer provides access to a shared informer and lister for
+// ImageContentPolicies.
+type ImageContentPolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ImageContentPolicyLister
+}
+
+type imageContentPolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewImageContentPolicyInformer constructs a new informer for ImageContentPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewImageContentPolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredImageContentPolicyInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredImageContentPolicyInformer constructs a new informer for ImageContentPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredImageContentPolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ImageContentPolicies().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ImageContentPolicies().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.ImageContentPolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *imageContentPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredImageContentPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *imageContentPolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.ImageContentPolicy{}, f.defaultInformer)
+}
+
+func (f *imageContentPolicyInformer) Lister() v1.ImageContentPolicyLister {
+ return v1.NewImageContentPolicyLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go
new file mode 100644
index 0000000000..8953cfd890
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagedigestmirrorset.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ImageDigestMirrorSetInformer provides access to a shared informer and lister for
+// ImageDigestMirrorSets.
+type ImageDigestMirrorSetInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ImageDigestMirrorSetLister
+}
+
+type imageDigestMirrorSetInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewImageDigestMirrorSetInformer constructs a new informer for ImageDigestMirrorSet type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewImageDigestMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredImageDigestMirrorSetInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredImageDigestMirrorSetInformer constructs a new informer for ImageDigestMirrorSet type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredImageDigestMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ImageDigestMirrorSets().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ImageDigestMirrorSets().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.ImageDigestMirrorSet{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *imageDigestMirrorSetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredImageDigestMirrorSetInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *imageDigestMirrorSetInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.ImageDigestMirrorSet{}, f.defaultInformer)
+}
+
+func (f *imageDigestMirrorSetInformer) Lister() v1.ImageDigestMirrorSetLister {
+ return v1.NewImageDigestMirrorSetLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go
new file mode 100644
index 0000000000..a0951a190f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/imagetagmirrorset.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ImageTagMirrorSetInformer provides access to a shared informer and lister for
+// ImageTagMirrorSets.
+type ImageTagMirrorSetInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ImageTagMirrorSetLister
+}
+
+type imageTagMirrorSetInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewImageTagMirrorSetInformer constructs a new informer for ImageTagMirrorSet type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewImageTagMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredImageTagMirrorSetInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredImageTagMirrorSetInformer constructs a new informer for ImageTagMirrorSet type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredImageTagMirrorSetInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ImageTagMirrorSets().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().ImageTagMirrorSets().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.ImageTagMirrorSet{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *imageTagMirrorSetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredImageTagMirrorSetInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *imageTagMirrorSetInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.ImageTagMirrorSet{}, f.defaultInformer)
+}
+
+func (f *imageTagMirrorSetInformer) Lister() v1.ImageTagMirrorSetLister {
+ return v1.NewImageTagMirrorSetLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go
new file mode 100644
index 0000000000..150ee6fe85
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// InfrastructureInformer provides access to a shared informer and lister for
+// Infrastructures.
+type InfrastructureInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.InfrastructureLister
+}
+
+type infrastructureInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewInfrastructureInformer constructs a new informer for Infrastructure type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewInfrastructureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredInfrastructureInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredInfrastructureInformer constructs a new informer for Infrastructure type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredInfrastructureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Infrastructures().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Infrastructures().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Infrastructure{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *infrastructureInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredInfrastructureInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *infrastructureInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Infrastructure{}, f.defaultInformer)
+}
+
+func (f *infrastructureInformer) Lister() v1.InfrastructureLister {
+ return v1.NewInfrastructureLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go
new file mode 100644
index 0000000000..4452b1022f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// IngressInformer provides access to a shared informer and lister for
+// Ingresses.
+type IngressInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.IngressLister
+}
+
+type ingressInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewIngressInformer constructs a new informer for Ingress type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredIngressInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredIngressInformer constructs a new informer for Ingress type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Ingresses().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Ingresses().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Ingress{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *ingressInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredIngressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *ingressInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Ingress{}, f.defaultInformer)
+}
+
+func (f *ingressInformer) Lister() v1.IngressLister {
+ return v1.NewIngressLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go
new file mode 100644
index 0000000000..f49b1d2287
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go
@@ -0,0 +1,169 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // APIServers returns a APIServerInformer.
+ APIServers() APIServerInformer
+ // Authentications returns a AuthenticationInformer.
+ Authentications() AuthenticationInformer
+ // Builds returns a BuildInformer.
+ Builds() BuildInformer
+ // ClusterOperators returns a ClusterOperatorInformer.
+ ClusterOperators() ClusterOperatorInformer
+ // ClusterVersions returns a ClusterVersionInformer.
+ ClusterVersions() ClusterVersionInformer
+ // Consoles returns a ConsoleInformer.
+ Consoles() ConsoleInformer
+ // DNSes returns a DNSInformer.
+ DNSes() DNSInformer
+ // FeatureGates returns a FeatureGateInformer.
+ FeatureGates() FeatureGateInformer
+ // Images returns a ImageInformer.
+ Images() ImageInformer
+ // ImageContentPolicies returns a ImageContentPolicyInformer.
+ ImageContentPolicies() ImageContentPolicyInformer
+ // ImageDigestMirrorSets returns a ImageDigestMirrorSetInformer.
+ ImageDigestMirrorSets() ImageDigestMirrorSetInformer
+ // ImageTagMirrorSets returns a ImageTagMirrorSetInformer.
+ ImageTagMirrorSets() ImageTagMirrorSetInformer
+ // Infrastructures returns a InfrastructureInformer.
+ Infrastructures() InfrastructureInformer
+ // Ingresses returns a IngressInformer.
+ Ingresses() IngressInformer
+ // Networks returns a NetworkInformer.
+ Networks() NetworkInformer
+ // Nodes returns a NodeInformer.
+ Nodes() NodeInformer
+ // OAuths returns a OAuthInformer.
+ OAuths() OAuthInformer
+ // OperatorHubs returns a OperatorHubInformer.
+ OperatorHubs() OperatorHubInformer
+ // Projects returns a ProjectInformer.
+ Projects() ProjectInformer
+ // Proxies returns a ProxyInformer.
+ Proxies() ProxyInformer
+ // Schedulers returns a SchedulerInformer.
+ Schedulers() SchedulerInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// APIServers returns a APIServerInformer.
+func (v *version) APIServers() APIServerInformer {
+ return &aPIServerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Authentications returns a AuthenticationInformer.
+func (v *version) Authentications() AuthenticationInformer {
+ return &authenticationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Builds returns a BuildInformer.
+func (v *version) Builds() BuildInformer {
+ return &buildInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ClusterOperators returns a ClusterOperatorInformer.
+func (v *version) ClusterOperators() ClusterOperatorInformer {
+ return &clusterOperatorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ClusterVersions returns a ClusterVersionInformer.
+func (v *version) ClusterVersions() ClusterVersionInformer {
+ return &clusterVersionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Consoles returns a ConsoleInformer.
+func (v *version) Consoles() ConsoleInformer {
+ return &consoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// DNSes returns a DNSInformer.
+func (v *version) DNSes() DNSInformer {
+ return &dNSInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// FeatureGates returns a FeatureGateInformer.
+func (v *version) FeatureGates() FeatureGateInformer {
+ return &featureGateInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Images returns a ImageInformer.
+func (v *version) Images() ImageInformer {
+ return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ImageContentPolicies returns a ImageContentPolicyInformer.
+func (v *version) ImageContentPolicies() ImageContentPolicyInformer {
+ return &imageContentPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ImageDigestMirrorSets returns a ImageDigestMirrorSetInformer.
+func (v *version) ImageDigestMirrorSets() ImageDigestMirrorSetInformer {
+ return &imageDigestMirrorSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ImageTagMirrorSets returns a ImageTagMirrorSetInformer.
+func (v *version) ImageTagMirrorSets() ImageTagMirrorSetInformer {
+ return &imageTagMirrorSetInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Infrastructures returns a InfrastructureInformer.
+func (v *version) Infrastructures() InfrastructureInformer {
+ return &infrastructureInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Ingresses returns a IngressInformer.
+func (v *version) Ingresses() IngressInformer {
+ return &ingressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Networks returns a NetworkInformer.
+func (v *version) Networks() NetworkInformer {
+ return &networkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Nodes returns a NodeInformer.
+func (v *version) Nodes() NodeInformer {
+ return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// OAuths returns a OAuthInformer.
+func (v *version) OAuths() OAuthInformer {
+ return &oAuthInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// OperatorHubs returns a OperatorHubInformer.
+func (v *version) OperatorHubs() OperatorHubInformer {
+ return &operatorHubInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Projects returns a ProjectInformer.
+func (v *version) Projects() ProjectInformer {
+ return &projectInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Proxies returns a ProxyInformer.
+func (v *version) Proxies() ProxyInformer {
+ return &proxyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// Schedulers returns a SchedulerInformer.
+func (v *version) Schedulers() SchedulerInformer {
+ return &schedulerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go
new file mode 100644
index 0000000000..d05980759a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// NetworkInformer provides access to a shared informer and lister for
+// Networks.
+type NetworkInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.NetworkLister
+}
+
+type networkInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewNetworkInformer constructs a new informer for Network type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredNetworkInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredNetworkInformer constructs a new informer for Network type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Networks().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Networks().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Network{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *networkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredNetworkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *networkInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Network{}, f.defaultInformer)
+}
+
+func (f *networkInformer) Lister() v1.NetworkLister {
+ return v1.NewNetworkLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go
new file mode 100644
index 0000000000..6a9f806dff
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/node.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// NodeInformer provides access to a shared informer and lister for
+// Nodes.
+type NodeInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.NodeLister
+}
+
+type nodeInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewNodeInformer constructs a new informer for Node type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredNodeInformer constructs a new informer for Node type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredNodeInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Nodes().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Nodes().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Node{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *nodeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *nodeInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Node{}, f.defaultInformer)
+}
+
+func (f *nodeInformer) Lister() v1.NodeLister {
+ return v1.NewNodeLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go
new file mode 100644
index 0000000000..31b37b7933
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// OAuthInformer provides access to a shared informer and lister for
+// OAuths.
+type OAuthInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.OAuthLister
+}
+
+type oAuthInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewOAuthInformer constructs a new informer for OAuth type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewOAuthInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredOAuthInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredOAuthInformer constructs a new informer for OAuth type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredOAuthInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().OAuths().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().OAuths().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.OAuth{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *oAuthInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredOAuthInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *oAuthInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.OAuth{}, f.defaultInformer)
+}
+
+func (f *oAuthInformer) Lister() v1.OAuthLister {
+ return v1.NewOAuthLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go
new file mode 100644
index 0000000000..a2c8757fce
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/operatorhub.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// OperatorHubInformer provides access to a shared informer and lister for
+// OperatorHubs.
+type OperatorHubInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.OperatorHubLister
+}
+
+type operatorHubInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewOperatorHubInformer constructs a new informer for OperatorHub type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewOperatorHubInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredOperatorHubInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredOperatorHubInformer constructs a new informer for OperatorHub type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredOperatorHubInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().OperatorHubs().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().OperatorHubs().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.OperatorHub{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *operatorHubInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredOperatorHubInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *operatorHubInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.OperatorHub{}, f.defaultInformer)
+}
+
+func (f *operatorHubInformer) Lister() v1.OperatorHubLister {
+ return v1.NewOperatorHubLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go
new file mode 100644
index 0000000000..c9f5af1ece
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ProjectInformer provides access to a shared informer and lister for
+// Projects.
+type ProjectInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ProjectLister
+}
+
+type projectInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewProjectInformer constructs a new informer for Project type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewProjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredProjectInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredProjectInformer constructs a new informer for Project type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredProjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Projects().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Projects().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Project{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *projectInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredProjectInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *projectInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Project{}, f.defaultInformer)
+}
+
+func (f *projectInformer) Lister() v1.ProjectLister {
+ return v1.NewProjectLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go
new file mode 100644
index 0000000000..cfbcd029e4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ProxyInformer provides access to a shared informer and lister for
+// Proxies.
+type ProxyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ProxyLister
+}
+
+type proxyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewProxyInformer constructs a new informer for Proxy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewProxyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredProxyInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredProxyInformer constructs a new informer for Proxy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredProxyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Proxies().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Proxies().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Proxy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *proxyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredProxyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *proxyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Proxy{}, f.defaultInformer)
+}
+
+func (f *proxyInformer) Lister() v1.ProxyLister {
+ return v1.NewProxyLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go
new file mode 100644
index 0000000000..104cdd76ca
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ configv1 "github.com/openshift/api/config/v1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "github.com/openshift/client-go/config/listers/config/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// SchedulerInformer provides access to a shared informer and lister for
+// Schedulers.
+type SchedulerInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.SchedulerLister
+}
+
+type schedulerInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewSchedulerInformer constructs a new informer for Scheduler type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewSchedulerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredSchedulerInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredSchedulerInformer constructs a new informer for Scheduler type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredSchedulerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Schedulers().List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1().Schedulers().Watch(context.TODO(), options)
+ },
+ },
+ &configv1.Scheduler{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *schedulerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredSchedulerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *schedulerInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1.Scheduler{}, f.defaultInformer)
+}
+
+func (f *schedulerInformer) Lister() v1.SchedulerLister {
+ return v1.NewSchedulerLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go
new file mode 100644
index 0000000000..ae3013d42a
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/backup.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// BackupInformer provides access to a shared informer and lister for
+// Backups.
+type BackupInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.BackupLister
+}
+
+type backupInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewBackupInformer constructs a new informer for Backup type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewBackupInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredBackupInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredBackupInformer constructs a new informer for Backup type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredBackupInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().Backups().List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().Backups().Watch(context.TODO(), options)
+ },
+ },
+ &configv1alpha1.Backup{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *backupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredBackupInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *backupInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1alpha1.Backup{}, f.defaultInformer)
+}
+
+func (f *backupInformer) Lister() v1alpha1.BackupLister {
+ return v1alpha1.NewBackupLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go
new file mode 100644
index 0000000000..2e71741a74
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/clusterimagepolicy.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ClusterImagePolicyInformer provides access to a shared informer and lister for
+// ClusterImagePolicies.
+type ClusterImagePolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.ClusterImagePolicyLister
+}
+
+type clusterImagePolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewClusterImagePolicyInformer constructs a new informer for ClusterImagePolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewClusterImagePolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredClusterImagePolicyInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredClusterImagePolicyInformer constructs a new informer for ClusterImagePolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredClusterImagePolicyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().ClusterImagePolicies().List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().ClusterImagePolicies().Watch(context.TODO(), options)
+ },
+ },
+ &configv1alpha1.ClusterImagePolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *clusterImagePolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredClusterImagePolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *clusterImagePolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1alpha1.ClusterImagePolicy{}, f.defaultInformer)
+}
+
+func (f *clusterImagePolicyInformer) Lister() v1alpha1.ClusterImagePolicyLister {
+ return v1alpha1.NewClusterImagePolicyLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go
new file mode 100644
index 0000000000..ba72caedf6
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/imagepolicy.go
@@ -0,0 +1,74 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ImagePolicyInformer provides access to a shared informer and lister for
+// ImagePolicies.
+type ImagePolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.ImagePolicyLister
+}
+
+type imagePolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewImagePolicyInformer constructs a new informer for ImagePolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewImagePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredImagePolicyInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredImagePolicyInformer constructs a new informer for ImagePolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredImagePolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().ImagePolicies(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().ImagePolicies(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &configv1alpha1.ImagePolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *imagePolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredImagePolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *imagePolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1alpha1.ImagePolicy{}, f.defaultInformer)
+}
+
+func (f *imagePolicyInformer) Lister() v1alpha1.ImagePolicyLister {
+ return v1alpha1.NewImagePolicyLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go
new file mode 100644
index 0000000000..22a41d3630
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go
@@ -0,0 +1,73 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ configv1alpha1 "github.com/openshift/api/config/v1alpha1"
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// InsightsDataGatherInformer provides access to a shared informer and lister for
+// InsightsDataGathers.
+type InsightsDataGatherInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.InsightsDataGatherLister
+}
+
+type insightsDataGatherInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewInsightsDataGatherInformer constructs a new informer for InsightsDataGather type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredInsightsDataGatherInformer constructs a new informer for InsightsDataGather type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().InsightsDataGathers().List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ConfigV1alpha1().InsightsDataGathers().Watch(context.TODO(), options)
+ },
+ },
+ &configv1alpha1.InsightsDataGather{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *insightsDataGatherInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *insightsDataGatherInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&configv1alpha1.InsightsDataGather{}, f.defaultInformer)
+}
+
+func (f *insightsDataGatherInformer) Lister() v1alpha1.InsightsDataGatherLister {
+ return v1alpha1.NewInsightsDataGatherLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go
new file mode 100644
index 0000000000..69b5569fa0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go
@@ -0,0 +1,50 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // Backups returns a BackupInformer.
+ Backups() BackupInformer
+ // ClusterImagePolicies returns a ClusterImagePolicyInformer.
+ ClusterImagePolicies() ClusterImagePolicyInformer
+ // ImagePolicies returns a ImagePolicyInformer.
+ ImagePolicies() ImagePolicyInformer
+ // InsightsDataGathers returns a InsightsDataGatherInformer.
+ InsightsDataGathers() InsightsDataGatherInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// Backups returns a BackupInformer.
+func (v *version) Backups() BackupInformer {
+ return &backupInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ClusterImagePolicies returns a ClusterImagePolicyInformer.
+func (v *version) ClusterImagePolicies() ClusterImagePolicyInformer {
+ return &clusterImagePolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ImagePolicies returns a ImagePolicyInformer.
+func (v *version) ImagePolicies() ImagePolicyInformer {
+ return &imagePolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// InsightsDataGathers returns a InsightsDataGatherInformer.
+func (v *version) InsightsDataGathers() InsightsDataGatherInformer {
+ return &insightsDataGatherInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go
new file mode 100644
index 0000000000..0607e96c87
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go
@@ -0,0 +1,245 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ reflect "reflect"
+ sync "sync"
+ time "time"
+
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ config "github.com/openshift/client-go/config/informers/externalversions/config"
+ internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// SharedInformerOption defines the functional option type for SharedInformerFactory.
+type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
+
+type sharedInformerFactory struct {
+ client versioned.Interface
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ lock sync.Mutex
+ defaultResync time.Duration
+ customResync map[reflect.Type]time.Duration
+ transform cache.TransformFunc
+
+ informers map[reflect.Type]cache.SharedIndexInformer
+ // startedInformers is used for tracking which informers have been started.
+ // This allows Start() to be called multiple times safely.
+ startedInformers map[reflect.Type]bool
+ // wg tracks how many goroutines were started.
+ wg sync.WaitGroup
+ // shuttingDown is true when Shutdown has been called. It may still be running
+ // because it needs to wait for goroutines.
+ shuttingDown bool
+}
+
+// WithCustomResyncConfig sets a custom resync period for the specified informer types.
+func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ for k, v := range resyncConfig {
+ factory.customResync[reflect.TypeOf(k)] = v
+ }
+ return factory
+ }
+}
+
+// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
+func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.tweakListOptions = tweakListOptions
+ return factory
+ }
+}
+
+// WithNamespace limits the SharedInformerFactory to the specified namespace.
+func WithNamespace(namespace string) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.namespace = namespace
+ return factory
+ }
+}
+
+// WithTransform sets a transform on all informers.
+func WithTransform(transform cache.TransformFunc) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.transform = transform
+ return factory
+ }
+}
+
+// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
+func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync)
+}
+
+// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
+// Listers obtained via this SharedInformerFactory will be subject to the same filters
+// as specified here.
+// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
+func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
+}
+
+// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
+func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
+ factory := &sharedInformerFactory{
+ client: client,
+ namespace: v1.NamespaceAll,
+ defaultResync: defaultResync,
+ informers: make(map[reflect.Type]cache.SharedIndexInformer),
+ startedInformers: make(map[reflect.Type]bool),
+ customResync: make(map[reflect.Type]time.Duration),
+ }
+
+ // Apply all options
+ for _, opt := range options {
+ factory = opt(factory)
+ }
+
+ return factory
+}
+
+func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.shuttingDown {
+ return
+ }
+
+ for informerType, informer := range f.informers {
+ if !f.startedInformers[informerType] {
+ f.wg.Add(1)
+ // We need a new variable in each loop iteration,
+ // otherwise the goroutine would use the loop variable
+ // and that keeps changing.
+ informer := informer
+ go func() {
+ defer f.wg.Done()
+ informer.Run(stopCh)
+ }()
+ f.startedInformers[informerType] = true
+ }
+ }
+}
+
+func (f *sharedInformerFactory) Shutdown() {
+ f.lock.Lock()
+ f.shuttingDown = true
+ f.lock.Unlock()
+
+ // Will return immediately if there is nothing to wait for.
+ f.wg.Wait()
+}
+
+func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
+ informers := func() map[reflect.Type]cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informers := map[reflect.Type]cache.SharedIndexInformer{}
+ for informerType, informer := range f.informers {
+ if f.startedInformers[informerType] {
+ informers[informerType] = informer
+ }
+ }
+ return informers
+ }()
+
+ res := map[reflect.Type]bool{}
+ for informType, informer := range informers {
+ res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
+ }
+ return res
+}
+
+// InformerFor returns the SharedIndexInformer for obj using an internal
+// client.
+func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informerType := reflect.TypeOf(obj)
+ informer, exists := f.informers[informerType]
+ if exists {
+ return informer
+ }
+
+ resyncPeriod, exists := f.customResync[informerType]
+ if !exists {
+ resyncPeriod = f.defaultResync
+ }
+
+ informer = newFunc(f.client, resyncPeriod)
+ informer.SetTransform(f.transform)
+ f.informers[informerType] = informer
+
+ return informer
+}
+
+// SharedInformerFactory provides shared informers for resources in all known
+// API group versions.
+//
+// It is typically used like this:
+//
+// ctx, cancel := context.Background()
+// defer cancel()
+// factory := NewSharedInformerFactory(client, resyncPeriod)
+// defer factory.WaitForStop() // Returns immediately if nothing was started.
+// genericInformer := factory.ForResource(resource)
+// typedInformer := factory.SomeAPIGroup().V1().SomeType()
+// factory.Start(ctx.Done()) // Start processing these informers.
+// synced := factory.WaitForCacheSync(ctx.Done())
+// for v, ok := range synced {
+// if !ok {
+// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
+// return
+// }
+// }
+//
+// // Creating informers can also be created after Start, but then
+// // Start must be called again:
+// anotherGenericInformer := factory.ForResource(resource)
+// factory.Start(ctx.Done())
+type SharedInformerFactory interface {
+ internalinterfaces.SharedInformerFactory
+
+ // Start initializes all requested informers. They are handled in goroutines
+ // which run until the stop channel gets closed.
+ Start(stopCh <-chan struct{})
+
+ // Shutdown marks a factory as shutting down. At that point no new
+ // informers can be started anymore and Start will return without
+ // doing anything.
+ //
+ // In addition, Shutdown blocks until all goroutines have terminated. For that
+ // to happen, the close channel(s) that they were started with must be closed,
+ // either before Shutdown gets called or while it is waiting.
+ //
+ // Shutdown may be called multiple times, even concurrently. All such calls will
+ // block until all goroutines have terminated.
+ Shutdown()
+
+ // WaitForCacheSync blocks until all started informers' caches were synced
+ // or the stop channel gets closed.
+ WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
+
+ // ForResource gives generic access to a shared informer of the matching type.
+ ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+ // InformerFor returns the SharedIndexInformer for obj using an internal
+ // client.
+ InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
+
+ Config() config.Interface
+}
+
+func (f *sharedInformerFactory) Config() config.Interface {
+ return config.New(f, f.namespace, f.tweakListOptions)
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go
new file mode 100644
index 0000000000..20b3b4e606
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go
@@ -0,0 +1,97 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ "fmt"
+
+ v1 "github.com/openshift/api/config/v1"
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
+// sharedInformers based on type
+type GenericInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() cache.GenericLister
+}
+
+type genericInformer struct {
+ informer cache.SharedIndexInformer
+ resource schema.GroupResource
+}
+
+// Informer returns the SharedIndexInformer.
+func (f *genericInformer) Informer() cache.SharedIndexInformer {
+ return f.informer
+}
+
+// Lister returns the GenericLister.
+func (f *genericInformer) Lister() cache.GenericLister {
+ return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
+}
+
+// ForResource gives generic access to a shared informer of the matching type
+// TODO extend this to unknown resources with a client pool
+func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
+ switch resource {
+ // Group=config.openshift.io, Version=v1
+ case v1.SchemeGroupVersion.WithResource("apiservers"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().APIServers().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("authentications"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Authentications().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("builds"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Builds().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("clusteroperators"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterOperators().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("clusterversions"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterVersions().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("consoles"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Consoles().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("dnses"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().DNSes().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("featuregates"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().FeatureGates().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("images"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Images().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("imagecontentpolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageContentPolicies().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("imagedigestmirrorsets"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageDigestMirrorSets().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("imagetagmirrorsets"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ImageTagMirrorSets().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("infrastructures"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Infrastructures().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("ingresses"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Ingresses().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("networks"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Networks().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("nodes"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Nodes().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("oauths"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().OAuths().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("operatorhubs"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().OperatorHubs().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("projects"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Projects().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("proxies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Proxies().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("schedulers"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Schedulers().Informer()}, nil
+
+ // Group=config.openshift.io, Version=v1alpha1
+ case v1alpha1.SchemeGroupVersion.WithResource("backups"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().Backups().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("clusterimagepolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().ClusterImagePolicies().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("imagepolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().ImagePolicies().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("insightsdatagathers"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().InsightsDataGathers().Informer()}, nil
+
+ }
+
+ return nil, fmt.Errorf("no informer found for %v", resource)
+}
diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go
new file mode 100644
index 0000000000..720235c485
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -0,0 +1,24 @@
+// Code generated by informer-gen. DO NOT EDIT.
+
+package internalinterfaces
+
+import (
+ time "time"
+
+ versioned "github.com/openshift/client-go/config/clientset/versioned"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
+type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
+
+// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
+type SharedInformerFactory interface {
+ Start(stopCh <-chan struct{})
+ InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
+}
+
+// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
+type TweakListOptionsFunc func(*v1.ListOptions)
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go
new file mode 100644
index 0000000000..247e40017b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/apiserver.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// APIServerLister helps list APIServers.
+// All objects returned here must be treated as read-only.
+type APIServerLister interface {
+ // List lists all APIServers in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.APIServer, err error)
+ // Get retrieves the APIServer from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.APIServer, error)
+ APIServerListerExpansion
+}
+
+// aPIServerLister implements the APIServerLister interface.
+type aPIServerLister struct {
+ indexer cache.Indexer
+}
+
+// NewAPIServerLister returns a new APIServerLister.
+func NewAPIServerLister(indexer cache.Indexer) APIServerLister {
+ return &aPIServerLister{indexer: indexer}
+}
+
+// List lists all APIServers in the indexer.
+func (s *aPIServerLister) List(selector labels.Selector) (ret []*v1.APIServer, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.APIServer))
+ })
+ return ret, err
+}
+
+// Get retrieves the APIServer from the index for a given name.
+func (s *aPIServerLister) Get(name string) (*v1.APIServer, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("apiserver"), name)
+ }
+ return obj.(*v1.APIServer), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go
new file mode 100644
index 0000000000..99cc824851
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/authentication.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// AuthenticationLister helps list Authentications.
+// All objects returned here must be treated as read-only.
+type AuthenticationLister interface {
+ // List lists all Authentications in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Authentication, err error)
+ // Get retrieves the Authentication from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Authentication, error)
+ AuthenticationListerExpansion
+}
+
+// authenticationLister implements the AuthenticationLister interface.
+type authenticationLister struct {
+ indexer cache.Indexer
+}
+
+// NewAuthenticationLister returns a new AuthenticationLister.
+func NewAuthenticationLister(indexer cache.Indexer) AuthenticationLister {
+ return &authenticationLister{indexer: indexer}
+}
+
+// List lists all Authentications in the indexer.
+func (s *authenticationLister) List(selector labels.Selector) (ret []*v1.Authentication, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Authentication))
+ })
+ return ret, err
+}
+
+// Get retrieves the Authentication from the index for a given name.
+func (s *authenticationLister) Get(name string) (*v1.Authentication, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("authentication"), name)
+ }
+ return obj.(*v1.Authentication), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go
new file mode 100644
index 0000000000..77a1a36ff8
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/build.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// BuildLister helps list Builds.
+// All objects returned here must be treated as read-only.
+type BuildLister interface {
+ // List lists all Builds in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Build, err error)
+ // Get retrieves the Build from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Build, error)
+ BuildListerExpansion
+}
+
+// buildLister implements the BuildLister interface.
+type buildLister struct {
+ indexer cache.Indexer
+}
+
+// NewBuildLister returns a new BuildLister.
+func NewBuildLister(indexer cache.Indexer) BuildLister {
+ return &buildLister{indexer: indexer}
+}
+
+// List lists all Builds in the indexer.
+func (s *buildLister) List(selector labels.Selector) (ret []*v1.Build, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Build))
+ })
+ return ret, err
+}
+
+// Get retrieves the Build from the index for a given name.
+func (s *buildLister) Get(name string) (*v1.Build, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("build"), name)
+ }
+ return obj.(*v1.Build), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go
new file mode 100644
index 0000000000..e4c95773a0
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusteroperator.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ClusterOperatorLister helps list ClusterOperators.
+// All objects returned here must be treated as read-only.
+type ClusterOperatorLister interface {
+ // List lists all ClusterOperators in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ClusterOperator, err error)
+ // Get retrieves the ClusterOperator from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ClusterOperator, error)
+ ClusterOperatorListerExpansion
+}
+
+// clusterOperatorLister implements the ClusterOperatorLister interface.
+type clusterOperatorLister struct {
+ indexer cache.Indexer
+}
+
+// NewClusterOperatorLister returns a new ClusterOperatorLister.
+func NewClusterOperatorLister(indexer cache.Indexer) ClusterOperatorLister {
+ return &clusterOperatorLister{indexer: indexer}
+}
+
+// List lists all ClusterOperators in the indexer.
+func (s *clusterOperatorLister) List(selector labels.Selector) (ret []*v1.ClusterOperator, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ClusterOperator))
+ })
+ return ret, err
+}
+
+// Get retrieves the ClusterOperator from the index for a given name.
+func (s *clusterOperatorLister) Get(name string) (*v1.ClusterOperator, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("clusteroperator"), name)
+ }
+ return obj.(*v1.ClusterOperator), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go
new file mode 100644
index 0000000000..ab309a1b20
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clusterversion.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ClusterVersionLister helps list ClusterVersions.
+// All objects returned here must be treated as read-only.
+type ClusterVersionLister interface {
+ // List lists all ClusterVersions in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ClusterVersion, err error)
+ // Get retrieves the ClusterVersion from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ClusterVersion, error)
+ ClusterVersionListerExpansion
+}
+
+// clusterVersionLister implements the ClusterVersionLister interface.
+type clusterVersionLister struct {
+ indexer cache.Indexer
+}
+
+// NewClusterVersionLister returns a new ClusterVersionLister.
+func NewClusterVersionLister(indexer cache.Indexer) ClusterVersionLister {
+ return &clusterVersionLister{indexer: indexer}
+}
+
+// List lists all ClusterVersions in the indexer.
+func (s *clusterVersionLister) List(selector labels.Selector) (ret []*v1.ClusterVersion, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ClusterVersion))
+ })
+ return ret, err
+}
+
+// Get retrieves the ClusterVersion from the index for a given name.
+func (s *clusterVersionLister) Get(name string) (*v1.ClusterVersion, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("clusterversion"), name)
+ }
+ return obj.(*v1.ClusterVersion), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go
new file mode 100644
index 0000000000..daaf7aa92f
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/console.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ConsoleLister helps list Consoles.
+// All objects returned here must be treated as read-only.
+type ConsoleLister interface {
+ // List lists all Consoles in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Console, err error)
+ // Get retrieves the Console from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Console, error)
+ ConsoleListerExpansion
+}
+
+// consoleLister implements the ConsoleLister interface.
+type consoleLister struct {
+ indexer cache.Indexer
+}
+
+// NewConsoleLister returns a new ConsoleLister.
+func NewConsoleLister(indexer cache.Indexer) ConsoleLister {
+ return &consoleLister{indexer: indexer}
+}
+
+// List lists all Consoles in the indexer.
+func (s *consoleLister) List(selector labels.Selector) (ret []*v1.Console, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Console))
+ })
+ return ret, err
+}
+
+// Get retrieves the Console from the index for a given name.
+func (s *consoleLister) Get(name string) (*v1.Console, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("console"), name)
+ }
+ return obj.(*v1.Console), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go
new file mode 100644
index 0000000000..89441b3a9d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/dns.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// DNSLister helps list DNSes.
+// All objects returned here must be treated as read-only.
+type DNSLister interface {
+ // List lists all DNSes in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.DNS, err error)
+ // Get retrieves the DNS from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.DNS, error)
+ DNSListerExpansion
+}
+
+// dNSLister implements the DNSLister interface.
+type dNSLister struct {
+ indexer cache.Indexer
+}
+
+// NewDNSLister returns a new DNSLister.
+func NewDNSLister(indexer cache.Indexer) DNSLister {
+ return &dNSLister{indexer: indexer}
+}
+
+// List lists all DNSes in the indexer.
+func (s *dNSLister) List(selector labels.Selector) (ret []*v1.DNS, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.DNS))
+ })
+ return ret, err
+}
+
+// Get retrieves the DNS from the index for a given name.
+func (s *dNSLister) Get(name string) (*v1.DNS, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("dns"), name)
+ }
+ return obj.(*v1.DNS), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go
new file mode 100644
index 0000000000..b5d6fc088b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go
@@ -0,0 +1,87 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+// APIServerListerExpansion allows custom methods to be added to
+// APIServerLister.
+type APIServerListerExpansion interface{}
+
+// AuthenticationListerExpansion allows custom methods to be added to
+// AuthenticationLister.
+type AuthenticationListerExpansion interface{}
+
+// BuildListerExpansion allows custom methods to be added to
+// BuildLister.
+type BuildListerExpansion interface{}
+
+// ClusterOperatorListerExpansion allows custom methods to be added to
+// ClusterOperatorLister.
+type ClusterOperatorListerExpansion interface{}
+
+// ClusterVersionListerExpansion allows custom methods to be added to
+// ClusterVersionLister.
+type ClusterVersionListerExpansion interface{}
+
+// ConsoleListerExpansion allows custom methods to be added to
+// ConsoleLister.
+type ConsoleListerExpansion interface{}
+
+// DNSListerExpansion allows custom methods to be added to
+// DNSLister.
+type DNSListerExpansion interface{}
+
+// FeatureGateListerExpansion allows custom methods to be added to
+// FeatureGateLister.
+type FeatureGateListerExpansion interface{}
+
+// ImageListerExpansion allows custom methods to be added to
+// ImageLister.
+type ImageListerExpansion interface{}
+
+// ImageContentPolicyListerExpansion allows custom methods to be added to
+// ImageContentPolicyLister.
+type ImageContentPolicyListerExpansion interface{}
+
+// ImageDigestMirrorSetListerExpansion allows custom methods to be added to
+// ImageDigestMirrorSetLister.
+type ImageDigestMirrorSetListerExpansion interface{}
+
+// ImageTagMirrorSetListerExpansion allows custom methods to be added to
+// ImageTagMirrorSetLister.
+type ImageTagMirrorSetListerExpansion interface{}
+
+// InfrastructureListerExpansion allows custom methods to be added to
+// InfrastructureLister.
+type InfrastructureListerExpansion interface{}
+
+// IngressListerExpansion allows custom methods to be added to
+// IngressLister.
+type IngressListerExpansion interface{}
+
+// NetworkListerExpansion allows custom methods to be added to
+// NetworkLister.
+type NetworkListerExpansion interface{}
+
+// NodeListerExpansion allows custom methods to be added to
+// NodeLister.
+type NodeListerExpansion interface{}
+
+// OAuthListerExpansion allows custom methods to be added to
+// OAuthLister.
+type OAuthListerExpansion interface{}
+
+// OperatorHubListerExpansion allows custom methods to be added to
+// OperatorHubLister.
+type OperatorHubListerExpansion interface{}
+
+// ProjectListerExpansion allows custom methods to be added to
+// ProjectLister.
+type ProjectListerExpansion interface{}
+
+// ProxyListerExpansion allows custom methods to be added to
+// ProxyLister.
+type ProxyListerExpansion interface{}
+
+// SchedulerListerExpansion allows custom methods to be added to
+// SchedulerLister.
+type SchedulerListerExpansion interface{}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go
new file mode 100644
index 0000000000..4c796e80ff
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// FeatureGateLister helps list FeatureGates.
+// All objects returned here must be treated as read-only.
+type FeatureGateLister interface {
+ // List lists all FeatureGates in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.FeatureGate, err error)
+ // Get retrieves the FeatureGate from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.FeatureGate, error)
+ FeatureGateListerExpansion
+}
+
+// featureGateLister implements the FeatureGateLister interface.
+type featureGateLister struct {
+ indexer cache.Indexer
+}
+
+// NewFeatureGateLister returns a new FeatureGateLister.
+func NewFeatureGateLister(indexer cache.Indexer) FeatureGateLister {
+ return &featureGateLister{indexer: indexer}
+}
+
+// List lists all FeatureGates in the indexer.
+func (s *featureGateLister) List(selector labels.Selector) (ret []*v1.FeatureGate, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.FeatureGate))
+ })
+ return ret, err
+}
+
+// Get retrieves the FeatureGate from the index for a given name.
+func (s *featureGateLister) Get(name string) (*v1.FeatureGate, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("featuregate"), name)
+ }
+ return obj.(*v1.FeatureGate), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go
new file mode 100644
index 0000000000..f563f919a4
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/image.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ImageLister helps list Images.
+// All objects returned here must be treated as read-only.
+type ImageLister interface {
+ // List lists all Images in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Image, err error)
+ // Get retrieves the Image from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Image, error)
+ ImageListerExpansion
+}
+
+// imageLister implements the ImageLister interface.
+type imageLister struct {
+ indexer cache.Indexer
+}
+
+// NewImageLister returns a new ImageLister.
+func NewImageLister(indexer cache.Indexer) ImageLister {
+ return &imageLister{indexer: indexer}
+}
+
+// List lists all Images in the indexer.
+func (s *imageLister) List(selector labels.Selector) (ret []*v1.Image, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Image))
+ })
+ return ret, err
+}
+
+// Get retrieves the Image from the index for a given name.
+func (s *imageLister) Get(name string) (*v1.Image, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("image"), name)
+ }
+ return obj.(*v1.Image), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go
new file mode 100644
index 0000000000..c9dadb9235
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagecontentpolicy.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ImageContentPolicyLister helps list ImageContentPolicies.
+// All objects returned here must be treated as read-only.
+type ImageContentPolicyLister interface {
+ // List lists all ImageContentPolicies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ImageContentPolicy, err error)
+ // Get retrieves the ImageContentPolicy from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ImageContentPolicy, error)
+ ImageContentPolicyListerExpansion
+}
+
+// imageContentPolicyLister implements the ImageContentPolicyLister interface.
+type imageContentPolicyLister struct {
+ indexer cache.Indexer
+}
+
+// NewImageContentPolicyLister returns a new ImageContentPolicyLister.
+func NewImageContentPolicyLister(indexer cache.Indexer) ImageContentPolicyLister {
+ return &imageContentPolicyLister{indexer: indexer}
+}
+
+// List lists all ImageContentPolicies in the indexer.
+func (s *imageContentPolicyLister) List(selector labels.Selector) (ret []*v1.ImageContentPolicy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ImageContentPolicy))
+ })
+ return ret, err
+}
+
+// Get retrieves the ImageContentPolicy from the index for a given name.
+func (s *imageContentPolicyLister) Get(name string) (*v1.ImageContentPolicy, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("imagecontentpolicy"), name)
+ }
+ return obj.(*v1.ImageContentPolicy), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go
new file mode 100644
index 0000000000..03c0e72d2d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagedigestmirrorset.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ImageDigestMirrorSetLister helps list ImageDigestMirrorSets.
+// All objects returned here must be treated as read-only.
+type ImageDigestMirrorSetLister interface {
+ // List lists all ImageDigestMirrorSets in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ImageDigestMirrorSet, err error)
+ // Get retrieves the ImageDigestMirrorSet from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ImageDigestMirrorSet, error)
+ ImageDigestMirrorSetListerExpansion
+}
+
+// imageDigestMirrorSetLister implements the ImageDigestMirrorSetLister interface.
+type imageDigestMirrorSetLister struct {
+ indexer cache.Indexer
+}
+
+// NewImageDigestMirrorSetLister returns a new ImageDigestMirrorSetLister.
+func NewImageDigestMirrorSetLister(indexer cache.Indexer) ImageDigestMirrorSetLister {
+ return &imageDigestMirrorSetLister{indexer: indexer}
+}
+
+// List lists all ImageDigestMirrorSets in the indexer.
+func (s *imageDigestMirrorSetLister) List(selector labels.Selector) (ret []*v1.ImageDigestMirrorSet, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ImageDigestMirrorSet))
+ })
+ return ret, err
+}
+
+// Get retrieves the ImageDigestMirrorSet from the index for a given name.
+func (s *imageDigestMirrorSetLister) Get(name string) (*v1.ImageDigestMirrorSet, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("imagedigestmirrorset"), name)
+ }
+ return obj.(*v1.ImageDigestMirrorSet), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go
new file mode 100644
index 0000000000..57b4431c02
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/imagetagmirrorset.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ImageTagMirrorSetLister helps list ImageTagMirrorSets.
+// All objects returned here must be treated as read-only.
+type ImageTagMirrorSetLister interface {
+ // List lists all ImageTagMirrorSets in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ImageTagMirrorSet, err error)
+ // Get retrieves the ImageTagMirrorSet from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ImageTagMirrorSet, error)
+ ImageTagMirrorSetListerExpansion
+}
+
+// imageTagMirrorSetLister implements the ImageTagMirrorSetLister interface.
+type imageTagMirrorSetLister struct {
+ indexer cache.Indexer
+}
+
+// NewImageTagMirrorSetLister returns a new ImageTagMirrorSetLister.
+func NewImageTagMirrorSetLister(indexer cache.Indexer) ImageTagMirrorSetLister {
+ return &imageTagMirrorSetLister{indexer: indexer}
+}
+
+// List lists all ImageTagMirrorSets in the indexer.
+func (s *imageTagMirrorSetLister) List(selector labels.Selector) (ret []*v1.ImageTagMirrorSet, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ImageTagMirrorSet))
+ })
+ return ret, err
+}
+
+// Get retrieves the ImageTagMirrorSet from the index for a given name.
+func (s *imageTagMirrorSetLister) Get(name string) (*v1.ImageTagMirrorSet, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("imagetagmirrorset"), name)
+ }
+ return obj.(*v1.ImageTagMirrorSet), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go
new file mode 100644
index 0000000000..33f4b229e2
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/infrastructure.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// InfrastructureLister helps list Infrastructures.
+// All objects returned here must be treated as read-only.
+type InfrastructureLister interface {
+ // List lists all Infrastructures in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Infrastructure, err error)
+ // Get retrieves the Infrastructure from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Infrastructure, error)
+ InfrastructureListerExpansion
+}
+
+// infrastructureLister implements the InfrastructureLister interface.
+type infrastructureLister struct {
+ indexer cache.Indexer
+}
+
+// NewInfrastructureLister returns a new InfrastructureLister.
+func NewInfrastructureLister(indexer cache.Indexer) InfrastructureLister {
+ return &infrastructureLister{indexer: indexer}
+}
+
+// List lists all Infrastructures in the indexer.
+func (s *infrastructureLister) List(selector labels.Selector) (ret []*v1.Infrastructure, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Infrastructure))
+ })
+ return ret, err
+}
+
+// Get retrieves the Infrastructure from the index for a given name.
+func (s *infrastructureLister) Get(name string) (*v1.Infrastructure, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("infrastructure"), name)
+ }
+ return obj.(*v1.Infrastructure), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go
new file mode 100644
index 0000000000..78b0fd1254
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/ingress.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// IngressLister helps list Ingresses.
+// All objects returned here must be treated as read-only.
+type IngressLister interface {
+ // List lists all Ingresses in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Ingress, err error)
+ // Get retrieves the Ingress from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Ingress, error)
+ IngressListerExpansion
+}
+
+// ingressLister implements the IngressLister interface.
+type ingressLister struct {
+ indexer cache.Indexer
+}
+
+// NewIngressLister returns a new IngressLister.
+func NewIngressLister(indexer cache.Indexer) IngressLister {
+ return &ingressLister{indexer: indexer}
+}
+
+// List lists all Ingresses in the indexer.
+func (s *ingressLister) List(selector labels.Selector) (ret []*v1.Ingress, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Ingress))
+ })
+ return ret, err
+}
+
+// Get retrieves the Ingress from the index for a given name.
+func (s *ingressLister) Get(name string) (*v1.Ingress, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("ingress"), name)
+ }
+ return obj.(*v1.Ingress), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go
new file mode 100644
index 0000000000..174fdb45b7
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/network.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// NetworkLister helps list Networks.
+// All objects returned here must be treated as read-only.
+type NetworkLister interface {
+ // List lists all Networks in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Network, err error)
+ // Get retrieves the Network from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Network, error)
+ NetworkListerExpansion
+}
+
+// networkLister implements the NetworkLister interface.
+type networkLister struct {
+ indexer cache.Indexer
+}
+
+// NewNetworkLister returns a new NetworkLister.
+func NewNetworkLister(indexer cache.Indexer) NetworkLister {
+ return &networkLister{indexer: indexer}
+}
+
+// List lists all Networks in the indexer.
+func (s *networkLister) List(selector labels.Selector) (ret []*v1.Network, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Network))
+ })
+ return ret, err
+}
+
+// Get retrieves the Network from the index for a given name.
+func (s *networkLister) Get(name string) (*v1.Network, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("network"), name)
+ }
+ return obj.(*v1.Network), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go
new file mode 100644
index 0000000000..b35c27117d
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/node.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// NodeLister helps list Nodes.
+// All objects returned here must be treated as read-only.
+type NodeLister interface {
+ // List lists all Nodes in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Node, err error)
+ // Get retrieves the Node from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Node, error)
+ NodeListerExpansion
+}
+
+// nodeLister implements the NodeLister interface.
+type nodeLister struct {
+ indexer cache.Indexer
+}
+
+// NewNodeLister returns a new NodeLister.
+func NewNodeLister(indexer cache.Indexer) NodeLister {
+ return &nodeLister{indexer: indexer}
+}
+
+// List lists all Nodes in the indexer.
+func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Node))
+ })
+ return ret, err
+}
+
+// Get retrieves the Node from the index for a given name.
+func (s *nodeLister) Get(name string) (*v1.Node, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("node"), name)
+ }
+ return obj.(*v1.Node), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go
new file mode 100644
index 0000000000..8d9882471b
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/oauth.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// OAuthLister helps list OAuths.
+// All objects returned here must be treated as read-only.
+type OAuthLister interface {
+ // List lists all OAuths in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.OAuth, err error)
+ // Get retrieves the OAuth from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.OAuth, error)
+ OAuthListerExpansion
+}
+
+// oAuthLister implements the OAuthLister interface.
+type oAuthLister struct {
+ indexer cache.Indexer
+}
+
+// NewOAuthLister returns a new OAuthLister.
+func NewOAuthLister(indexer cache.Indexer) OAuthLister {
+ return &oAuthLister{indexer: indexer}
+}
+
+// List lists all OAuths in the indexer.
+func (s *oAuthLister) List(selector labels.Selector) (ret []*v1.OAuth, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.OAuth))
+ })
+ return ret, err
+}
+
+// Get retrieves the OAuth from the index for a given name.
+func (s *oAuthLister) Get(name string) (*v1.OAuth, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("oauth"), name)
+ }
+ return obj.(*v1.OAuth), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go
new file mode 100644
index 0000000000..b69a49471c
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/operatorhub.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// OperatorHubLister helps list OperatorHubs.
+// All objects returned here must be treated as read-only.
+type OperatorHubLister interface {
+ // List lists all OperatorHubs in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.OperatorHub, err error)
+ // Get retrieves the OperatorHub from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.OperatorHub, error)
+ OperatorHubListerExpansion
+}
+
+// operatorHubLister implements the OperatorHubLister interface.
+type operatorHubLister struct {
+ indexer cache.Indexer
+}
+
+// NewOperatorHubLister returns a new OperatorHubLister.
+func NewOperatorHubLister(indexer cache.Indexer) OperatorHubLister {
+ return &operatorHubLister{indexer: indexer}
+}
+
+// List lists all OperatorHubs in the indexer.
+func (s *operatorHubLister) List(selector labels.Selector) (ret []*v1.OperatorHub, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.OperatorHub))
+ })
+ return ret, err
+}
+
+// Get retrieves the OperatorHub from the index for a given name.
+func (s *operatorHubLister) Get(name string) (*v1.OperatorHub, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("operatorhub"), name)
+ }
+ return obj.(*v1.OperatorHub), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go
new file mode 100644
index 0000000000..30273ba6be
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/project.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ProjectLister helps list Projects.
+// All objects returned here must be treated as read-only.
+type ProjectLister interface {
+ // List lists all Projects in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Project, err error)
+ // Get retrieves the Project from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Project, error)
+ ProjectListerExpansion
+}
+
+// projectLister implements the ProjectLister interface.
+type projectLister struct {
+ indexer cache.Indexer
+}
+
+// NewProjectLister returns a new ProjectLister.
+func NewProjectLister(indexer cache.Indexer) ProjectLister {
+ return &projectLister{indexer: indexer}
+}
+
+// List lists all Projects in the indexer.
+func (s *projectLister) List(selector labels.Selector) (ret []*v1.Project, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Project))
+ })
+ return ret, err
+}
+
+// Get retrieves the Project from the index for a given name.
+func (s *projectLister) Get(name string) (*v1.Project, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("project"), name)
+ }
+ return obj.(*v1.Project), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go
new file mode 100644
index 0000000000..8ecb633c3e
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/proxy.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ProxyLister helps list Proxies.
+// All objects returned here must be treated as read-only.
+type ProxyLister interface {
+ // List lists all Proxies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Proxy, err error)
+ // Get retrieves the Proxy from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Proxy, error)
+ ProxyListerExpansion
+}
+
+// proxyLister implements the ProxyLister interface.
+type proxyLister struct {
+ indexer cache.Indexer
+}
+
+// NewProxyLister returns a new ProxyLister.
+func NewProxyLister(indexer cache.Indexer) ProxyLister {
+ return &proxyLister{indexer: indexer}
+}
+
+// List lists all Proxies in the indexer.
+func (s *proxyLister) List(selector labels.Selector) (ret []*v1.Proxy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Proxy))
+ })
+ return ret, err
+}
+
+// Get retrieves the Proxy from the index for a given name.
+func (s *proxyLister) Get(name string) (*v1.Proxy, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("proxy"), name)
+ }
+ return obj.(*v1.Proxy), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go
new file mode 100644
index 0000000000..3e2f81ea44
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/scheduler.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/openshift/api/config/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// SchedulerLister helps list Schedulers.
+// All objects returned here must be treated as read-only.
+type SchedulerLister interface {
+ // List lists all Schedulers in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Scheduler, err error)
+ // Get retrieves the Scheduler from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Scheduler, error)
+ SchedulerListerExpansion
+}
+
+// schedulerLister implements the SchedulerLister interface.
+type schedulerLister struct {
+ indexer cache.Indexer
+}
+
+// NewSchedulerLister returns a new SchedulerLister.
+func NewSchedulerLister(indexer cache.Indexer) SchedulerLister {
+ return &schedulerLister{indexer: indexer}
+}
+
+// List lists all Schedulers in the indexer.
+func (s *schedulerLister) List(selector labels.Selector) (ret []*v1.Scheduler, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Scheduler))
+ })
+ return ret, err
+}
+
+// Get retrieves the Scheduler from the index for a given name.
+func (s *schedulerLister) Get(name string) (*v1.Scheduler, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("scheduler"), name)
+ }
+ return obj.(*v1.Scheduler), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go
new file mode 100644
index 0000000000..0ee7bee660
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/backup.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// BackupLister helps list Backups.
+// All objects returned here must be treated as read-only.
+type BackupLister interface {
+ // List lists all Backups in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.Backup, err error)
+ // Get retrieves the Backup from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.Backup, error)
+ BackupListerExpansion
+}
+
+// backupLister implements the BackupLister interface.
+type backupLister struct {
+ indexer cache.Indexer
+}
+
+// NewBackupLister returns a new BackupLister.
+func NewBackupLister(indexer cache.Indexer) BackupLister {
+ return &backupLister{indexer: indexer}
+}
+
+// List lists all Backups in the indexer.
+func (s *backupLister) List(selector labels.Selector) (ret []*v1alpha1.Backup, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.Backup))
+ })
+ return ret, err
+}
+
+// Get retrieves the Backup from the index for a given name.
+func (s *backupLister) Get(name string) (*v1alpha1.Backup, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("backup"), name)
+ }
+ return obj.(*v1alpha1.Backup), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go
new file mode 100644
index 0000000000..80fe45e1ef
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/clusterimagepolicy.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ClusterImagePolicyLister helps list ClusterImagePolicies.
+// All objects returned here must be treated as read-only.
+type ClusterImagePolicyLister interface {
+ // List lists all ClusterImagePolicies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.ClusterImagePolicy, err error)
+ // Get retrieves the ClusterImagePolicy from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.ClusterImagePolicy, error)
+ ClusterImagePolicyListerExpansion
+}
+
+// clusterImagePolicyLister implements the ClusterImagePolicyLister interface.
+type clusterImagePolicyLister struct {
+ indexer cache.Indexer
+}
+
+// NewClusterImagePolicyLister returns a new ClusterImagePolicyLister.
+func NewClusterImagePolicyLister(indexer cache.Indexer) ClusterImagePolicyLister {
+ return &clusterImagePolicyLister{indexer: indexer}
+}
+
+// List lists all ClusterImagePolicies in the indexer.
+func (s *clusterImagePolicyLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterImagePolicy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ClusterImagePolicy))
+ })
+ return ret, err
+}
+
+// Get retrieves the ClusterImagePolicy from the index for a given name.
+func (s *clusterImagePolicyLister) Get(name string) (*v1alpha1.ClusterImagePolicy, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("clusterimagepolicy"), name)
+ }
+ return obj.(*v1alpha1.ClusterImagePolicy), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go
new file mode 100644
index 0000000000..97e64a7cc9
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go
@@ -0,0 +1,23 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// BackupListerExpansion allows custom methods to be added to
+// BackupLister.
+type BackupListerExpansion interface{}
+
+// ClusterImagePolicyListerExpansion allows custom methods to be added to
+// ClusterImagePolicyLister.
+type ClusterImagePolicyListerExpansion interface{}
+
+// ImagePolicyListerExpansion allows custom methods to be added to
+// ImagePolicyLister.
+type ImagePolicyListerExpansion interface{}
+
+// ImagePolicyNamespaceListerExpansion allows custom methods to be added to
+// ImagePolicyNamespaceLister.
+type ImagePolicyNamespaceListerExpansion interface{}
+
+// InsightsDataGatherListerExpansion allows custom methods to be added to
+// InsightsDataGatherLister.
+type InsightsDataGatherListerExpansion interface{}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go
new file mode 100644
index 0000000000..4071711abc
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/imagepolicy.go
@@ -0,0 +1,83 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ImagePolicyLister helps list ImagePolicies.
+// All objects returned here must be treated as read-only.
+type ImagePolicyLister interface {
+ // List lists all ImagePolicies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.ImagePolicy, err error)
+ // ImagePolicies returns an object that can list and get ImagePolicies.
+ ImagePolicies(namespace string) ImagePolicyNamespaceLister
+ ImagePolicyListerExpansion
+}
+
+// imagePolicyLister implements the ImagePolicyLister interface.
+type imagePolicyLister struct {
+ indexer cache.Indexer
+}
+
+// NewImagePolicyLister returns a new ImagePolicyLister.
+func NewImagePolicyLister(indexer cache.Indexer) ImagePolicyLister {
+ return &imagePolicyLister{indexer: indexer}
+}
+
+// List lists all ImagePolicies in the indexer.
+func (s *imagePolicyLister) List(selector labels.Selector) (ret []*v1alpha1.ImagePolicy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ImagePolicy))
+ })
+ return ret, err
+}
+
+// ImagePolicies returns an object that can list and get ImagePolicies.
+func (s *imagePolicyLister) ImagePolicies(namespace string) ImagePolicyNamespaceLister {
+ return imagePolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ImagePolicyNamespaceLister helps list and get ImagePolicies.
+// All objects returned here must be treated as read-only.
+type ImagePolicyNamespaceLister interface {
+ // List lists all ImagePolicies in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.ImagePolicy, err error)
+ // Get retrieves the ImagePolicy from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.ImagePolicy, error)
+ ImagePolicyNamespaceListerExpansion
+}
+
+// imagePolicyNamespaceLister implements the ImagePolicyNamespaceLister
+// interface.
+type imagePolicyNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all ImagePolicies in the indexer for a given namespace.
+func (s imagePolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ImagePolicy, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ImagePolicy))
+ })
+ return ret, err
+}
+
+// Get retrieves the ImagePolicy from the indexer for a given namespace and name.
+func (s imagePolicyNamespaceLister) Get(name string) (*v1alpha1.ImagePolicy, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("imagepolicy"), name)
+ }
+ return obj.(*v1alpha1.ImagePolicy), nil
+}
diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go
new file mode 100644
index 0000000000..887f066e40
--- /dev/null
+++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go
@@ -0,0 +1,52 @@
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/openshift/api/config/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// InsightsDataGatherLister helps list InsightsDataGathers.
+// All objects returned here must be treated as read-only.
+type InsightsDataGatherLister interface {
+ // List lists all InsightsDataGathers in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.InsightsDataGather, err error)
+ // Get retrieves the InsightsDataGather from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.InsightsDataGather, error)
+ InsightsDataGatherListerExpansion
+}
+
+// insightsDataGatherLister implements the InsightsDataGatherLister interface.
+type insightsDataGatherLister struct {
+ indexer cache.Indexer
+}
+
+// NewInsightsDataGatherLister returns a new InsightsDataGatherLister.
+func NewInsightsDataGatherLister(indexer cache.Indexer) InsightsDataGatherLister {
+ return &insightsDataGatherLister{indexer: indexer}
+}
+
+// List lists all InsightsDataGathers in the indexer.
+func (s *insightsDataGatherLister) List(selector labels.Selector) (ret []*v1alpha1.InsightsDataGather, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.InsightsDataGather))
+ })
+ return ret, err
+}
+
+// Get retrieves the InsightsDataGather from the index for a given name.
+func (s *insightsDataGatherLister) Get(name string) (*v1alpha1.InsightsDataGather, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("insightsdatagather"), name)
+ }
+ return obj.(*v1alpha1.InsightsDataGather), nil
+}
diff --git a/vendor/github.com/openshift/library-go/LICENSE b/vendor/github.com/openshift/library-go/LICENSE
new file mode 100644
index 0000000000..261eeb9e9f
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go
new file mode 100644
index 0000000000..a72f8aa732
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go
@@ -0,0 +1,280 @@
+package factory
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/robfig/cron"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+ "k8s.io/klog/v2"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+ "github.com/openshift/library-go/pkg/operator/management"
+ "github.com/openshift/library-go/pkg/operator/v1helpers"
+ operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
+)
+
+// SyntheticRequeueError can be returned from sync() in case of forcing a sync() retry artificially.
+// This can be also done by re-adding the key to queue, but this is cheaper and more convenient.
+var SyntheticRequeueError = errors.New("synthetic requeue request")
+
+var defaultCacheSyncTimeout = 10 * time.Minute
+
+// baseController represents generic Kubernetes controller boiler-plate
+type baseController struct {
+ name string
+ cachesToSync []cache.InformerSynced
+ sync func(ctx context.Context, controllerContext SyncContext) error
+ syncContext SyncContext
+ syncDegradedClient operatorv1helpers.OperatorClient
+ resyncEvery time.Duration
+ resyncSchedules []cron.Schedule
+ postStartHooks []PostStartHook
+ cacheSyncTimeout time.Duration
+}
+
+var _ Controller = &baseController{}
+
+func (c baseController) Name() string {
+ return c.name
+}
+
+type scheduledJob struct {
+ queue workqueue.RateLimitingInterface
+ name string
+}
+
+func newScheduledJob(name string, queue workqueue.RateLimitingInterface) cron.Job {
+ return &scheduledJob{
+ queue: queue,
+ name: name,
+ }
+}
+
+func (s *scheduledJob) Run() {
+ klog.V(4).Infof("Triggering scheduled %q controller run", s.name)
+ s.queue.Add(DefaultQueueKey)
+}
+
+func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error {
+ klog.Infof("Waiting for caches to sync for %s", controllerName)
+
+ if !cache.WaitForCacheSync(stopCh, cacheSyncs...) {
+ return fmt.Errorf("unable to sync caches for %s", controllerName)
+ }
+
+ klog.Infof("Caches are synced for %s ", controllerName)
+
+ return nil
+}
+
+func (c *baseController) Run(ctx context.Context, workers int) {
+ // HandleCrash recovers panics
+ defer utilruntime.HandleCrash(c.degradedPanicHandler)
+
+ // give caches 10 minutes to sync
+ cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout)
+ defer cacheSyncCancel()
+ err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...)
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ // Exit gracefully because the controller was requested to stop.
+ return
+ default:
+ // If caches did not sync after 10 minutes, it has taken oddly long and
+ // we should provide feedback. Since the control loops will never start,
+ // it is safer to exit with a good message than to continue with a dead loop.
+ // TODO: Consider making this behavior configurable.
+ klog.Exit(err)
+ }
+ }
+
+ var workerWg sync.WaitGroup
+ defer func() {
+ defer klog.Infof("All %s workers have been terminated", c.name)
+ workerWg.Wait()
+ }()
+
+ // queueContext is used to track and initiate queue shutdown
+ queueContext, queueContextCancel := context.WithCancel(context.TODO())
+
+ for i := 1; i <= workers; i++ {
+ klog.Infof("Starting #%d worker of %s controller ...", i, c.name)
+ workerWg.Add(1)
+ go func() {
+ defer func() {
+ klog.Infof("Shutting down worker of %s controller ...", c.name)
+ workerWg.Done()
+ }()
+ c.runWorker(queueContext)
+ }()
+ }
+
+ // if scheduled run is requested, run the cron scheduler
+ if c.resyncSchedules != nil {
+ scheduler := cron.New()
+ for _, s := range c.resyncSchedules {
+ scheduler.Schedule(s, newScheduledJob(c.name, c.syncContext.Queue()))
+ }
+ scheduler.Start()
+ defer scheduler.Stop()
+ }
+
+ // runPeriodicalResync is independent from queue
+ if c.resyncEvery > 0 {
+ workerWg.Add(1)
+ go func() {
+ defer workerWg.Done()
+ c.runPeriodicalResync(ctx, c.resyncEvery)
+ }()
+ }
+
+ // run post-start hooks (custom triggers, etc.)
+ if len(c.postStartHooks) > 0 {
+ var hookWg sync.WaitGroup
+ defer func() {
+ hookWg.Wait() // wait for the post-start hooks
+ klog.Infof("All %s post start hooks have been terminated", c.name)
+ }()
+ for i := range c.postStartHooks {
+ hookWg.Add(1)
+ go func(index int) {
+ defer hookWg.Done()
+ if err := c.postStartHooks[index](ctx, c.syncContext); err != nil {
+ klog.Warningf("%s controller post start hook error: %v", c.name, err)
+ }
+ }(i)
+ }
+ }
+
+ // Handle controller shutdown
+
+ <-ctx.Done() // wait for controller context to be cancelled
+ c.syncContext.Queue().ShutDown() // shutdown the controller queue first
+ queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown
+
+ // Wait for all workers to finish their job.
+ // at this point the Run() can hang and caller have to implement the logic that will kill
+ // this controller (SIGKILL).
+ klog.Infof("Shutting down %s ...", c.name)
+}
+
+func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext) error {
+ return c.sync(ctx, syncCtx)
+}
+
+func (c *baseController) runPeriodicalResync(ctx context.Context, interval time.Duration) {
+ if interval == 0 {
+ return
+ }
+ go wait.UntilWithContext(ctx, func(ctx context.Context) {
+ c.syncContext.Queue().Add(DefaultQueueKey)
+ }, interval)
+}
+
+// runWorker runs a single worker
+// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time
+// to complete its shutdown.
+func (c *baseController) runWorker(queueCtx context.Context) {
+ wait.UntilWithContext(
+ queueCtx,
+ func(queueCtx context.Context) {
+ defer utilruntime.HandleCrash(c.degradedPanicHandler)
+ for {
+ select {
+ case <-queueCtx.Done():
+ return
+ default:
+ c.processNextWorkItem(queueCtx)
+ }
+ }
+ },
+ 1*time.Second)
+}
+
+// reconcile wraps the sync() call and if operator client is set, it handle the degraded condition if sync() returns an error.
+func (c *baseController) reconcile(ctx context.Context, syncCtx SyncContext) error {
+ err := c.sync(ctx, syncCtx)
+ degradedErr := c.reportDegraded(ctx, err)
+ if apierrors.IsNotFound(degradedErr) && management.IsOperatorRemovable() {
+ // The operator tolerates missing CR, therefore don't report it up.
+ return err
+ }
+ return degradedErr
+}
+
+// degradedPanicHandler will go degraded on failures, then we should catch potential panics and covert them into bad status.
+func (c *baseController) degradedPanicHandler(panicVal interface{}) {
+ if c.syncDegradedClient == nil {
+ // if we don't have a client for reporting degraded condition, then let the existing panic handler do the work
+ return
+ }
+ _ = c.reportDegraded(context.TODO(), fmt.Errorf("panic caught:\n%v", panicVal))
+}
+
+// reportDegraded updates status with an indication of degraded-ness
+func (c *baseController) reportDegraded(ctx context.Context, reportedError error) error {
+ if c.syncDegradedClient == nil {
+ return reportedError
+ }
+ if reportedError != nil {
+ _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{
+ Type: c.name + "Degraded",
+ Status: operatorv1.ConditionTrue,
+ Reason: "SyncError",
+ Message: reportedError.Error(),
+ }))
+ if updateErr != nil {
+ klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr)
+ }
+ return reportedError
+ }
+ _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient,
+ v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{
+ Type: c.name + "Degraded",
+ Status: operatorv1.ConditionFalse,
+ Reason: "AsExpected",
+ }))
+ return updateErr
+}
+
+func (c *baseController) processNextWorkItem(queueCtx context.Context) {
+ key, quit := c.syncContext.Queue().Get()
+ if quit {
+ return
+ }
+ defer c.syncContext.Queue().Done(key)
+
+ syncCtx := c.syncContext.(syncContext)
+ var ok bool
+ syncCtx.queueKey, ok = key.(string)
+ if !ok {
+ utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key))
+ return
+ }
+
+ if err := c.reconcile(queueCtx, syncCtx); err != nil {
+ if err == SyntheticRequeueError {
+ // logging this helps detecting wedged controllers with missing pre-requirements
+ klog.V(5).Infof("%q controller requested synthetic requeue with key %q", c.name, key)
+ } else {
+ if klog.V(4).Enabled() || key != "key" {
+ utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err))
+ } else {
+ utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err))
+ }
+ }
+ c.syncContext.Queue().AddRateLimited(key)
+ return
+ }
+
+ c.syncContext.Queue().Forget(key)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go
new file mode 100644
index 0000000000..3c585e40af
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go
@@ -0,0 +1,116 @@
+package factory
+
+import (
+ "fmt"
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+)
+
+// syncContext implements SyncContext and provide user access to queue and object that caused
+// the sync to be triggered.
+type syncContext struct {
+ eventRecorder events.Recorder
+ queue workqueue.RateLimitingInterface
+ queueKey string
+}
+
+var _ SyncContext = syncContext{}
+
+// NewSyncContext gives new sync context.
+func NewSyncContext(name string, recorder events.Recorder) SyncContext {
+ return syncContext{
+ queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name),
+ eventRecorder: recorder.WithComponentSuffix(strings.ToLower(name)),
+ }
+}
+
+func (c syncContext) Queue() workqueue.RateLimitingInterface {
+ return c.queue
+}
+
+func (c syncContext) QueueKey() string {
+ return c.queueKey
+}
+
+func (c syncContext) Recorder() events.Recorder {
+ return c.eventRecorder
+}
+
+// eventHandler provides default event handler that is added to an informers passed to controller factory.
+func (c syncContext) eventHandler(queueKeysFunc ObjectQueueKeysFunc, filter EventFilterFunc) cache.ResourceEventHandler {
+ resourceEventHandler := cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ runtimeObj, ok := obj.(runtime.Object)
+ if !ok {
+ utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj))
+ return
+ }
+ c.enqueueKeys(queueKeysFunc(runtimeObj)...)
+ },
+ UpdateFunc: func(old, new interface{}) {
+ runtimeObj, ok := new.(runtime.Object)
+ if !ok {
+ utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj))
+ return
+ }
+ c.enqueueKeys(queueKeysFunc(runtimeObj)...)
+ },
+ DeleteFunc: func(obj interface{}) {
+ runtimeObj, ok := obj.(runtime.Object)
+ if !ok {
+ if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
+ c.enqueueKeys(queueKeysFunc(tombstone.Obj.(runtime.Object))...)
+
+ return
+ }
+ utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj))
+ return
+ }
+ c.enqueueKeys(queueKeysFunc(runtimeObj)...)
+ },
+ }
+ if filter == nil {
+ return resourceEventHandler
+ }
+ return cache.FilteringResourceEventHandler{
+ FilterFunc: filter,
+ Handler: resourceEventHandler,
+ }
+}
+
+func (c syncContext) enqueueKeys(keys ...string) {
+ for _, qKey := range keys {
+ c.queue.Add(qKey)
+ }
+}
+
+// namespaceChecker returns a function which returns true if an inpuut obj
+// (or its tombstone) is a namespace and it matches a name of any namespaces
+// that we are interested in
+func namespaceChecker(interestingNamespaces []string) func(obj interface{}) bool {
+ interestingNamespacesSet := sets.NewString(interestingNamespaces...)
+
+ return func(obj interface{}) bool {
+ ns, ok := obj.(*corev1.Namespace)
+ if ok {
+ return interestingNamespacesSet.Has(ns.Name)
+ }
+
+ // the object might be getting deleted
+ tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
+ if ok {
+ if ns, ok := tombstone.Obj.(*corev1.Namespace); ok {
+ return interestingNamespacesSet.Has(ns.Name)
+ }
+ }
+ return false
+ }
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go
new file mode 100644
index 0000000000..b70da95481
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go
@@ -0,0 +1,26 @@
+package factory
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+func ObjectNameToKey(obj runtime.Object) string {
+ metaObj, ok := obj.(metav1.ObjectMetaAccessor)
+ if !ok {
+ return ""
+ }
+ return metaObj.GetObjectMeta().GetName()
+}
+
+func NamesFilter(names ...string) EventFilterFunc {
+ nameSet := sets.NewString(names...)
+ return func(obj interface{}) bool {
+ metaObj, ok := obj.(metav1.ObjectMetaAccessor)
+ if !ok {
+ return false
+ }
+ return nameSet.Has(metaObj.GetObjectMeta().GetName())
+ }
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go
new file mode 100644
index 0000000000..728f78f71e
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go
@@ -0,0 +1,315 @@
+package factory
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/robfig/cron"
+ "k8s.io/apimachinery/pkg/runtime"
+ errorutil "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
+)
+
+// DefaultQueueKey is the queue key used for string trigger based controllers.
+const DefaultQueueKey = "key"
+
+// DefaultQueueKeysFunc returns a slice with a single element - the DefaultQueueKey
+func DefaultQueueKeysFunc(_ runtime.Object) []string {
+ return []string{DefaultQueueKey}
+}
+
+// Factory is generator that generate standard Kubernetes controllers.
+// Factory is really generic and should be only used for simple controllers that does not require special stuff..
+type Factory struct {
+ sync SyncFunc
+ syncContext SyncContext
+ syncDegradedClient operatorv1helpers.OperatorClient
+ resyncInterval time.Duration
+ resyncSchedules []string
+ informers []filteredInformers
+ informerQueueKeys []informersWithQueueKey
+ bareInformers []Informer
+ postStartHooks []PostStartHook
+ namespaceInformers []*namespaceInformer
+ cachesToSync []cache.InformerSynced
+ interestingNamespaces sets.String
+}
+
+// Informer represents any structure that allow to register event handlers and informs if caches are synced.
+// Any SharedInformer will comply.
+type Informer interface {
+ AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error)
+ HasSynced() bool
+}
+
+type namespaceInformer struct {
+ informer Informer
+ nsFilter EventFilterFunc
+}
+
+type informersWithQueueKey struct {
+ informers []Informer
+ filter EventFilterFunc
+ queueKeyFn ObjectQueueKeysFunc
+}
+
+type filteredInformers struct {
+ informers []Informer
+ filter EventFilterFunc
+}
+
+// PostStartHook specify a function that will run after controller is started.
+// The context is cancelled when the controller is asked to shutdown and the post start hook should terminate as well.
+// The syncContext allow access to controller queue and event recorder.
+type PostStartHook func(ctx context.Context, syncContext SyncContext) error
+
+// ObjectQueueKeyFunc is used to make a string work queue key out of the runtime object that is passed to it.
+// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string
+// triggers.
+// DEPRECATED: use ObjectQueueKeysFunc instead
+type ObjectQueueKeyFunc func(runtime.Object) string
+
+// ObjectQueueKeysFunc is used to make a string work queue keys out of the runtime object that is passed to it.
+// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string
+// triggers.
+type ObjectQueueKeysFunc func(runtime.Object) []string
+
+// EventFilterFunc is used to filter informer events to prevent Sync() from being called
+type EventFilterFunc func(obj interface{}) bool
+
+// New return new factory instance.
+func New() *Factory {
+ return &Factory{}
+}
+
+// Sync is used to set the controller synchronization function. This function is the core of the controller and is
+// usually hold the main controller logic.
+func (f *Factory) WithSync(syncFn SyncFunc) *Factory {
+ f.sync = syncFn
+ return f
+}
+
+// WithInformers is used to register event handlers and get the caches synchronized functions.
+// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
+// is called.
+func (f *Factory) WithInformers(informers ...Informer) *Factory {
+ f.WithFilteredEventsInformers(nil, informers...)
+ return f
+}
+
+// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions.
+// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
+// is called.
+// Pass filter to filter out events that should not trigger Sync() call.
+func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory {
+ f.informers = append(f.informers, filteredInformers{
+ informers: informers,
+ filter: filter,
+ })
+ return f
+}
+
+// WithBareInformers allow to register informer that already has custom event handlers registered and no additional
+// event handlers will be added to this informer.
+// The controller will wait for the cache of this informer to be synced.
+// The existing event handlers will have to respect the queue key function or the sync() implementation will have to
+// count with custom queue keys.
+func (f *Factory) WithBareInformers(informers ...Informer) *Factory {
+ f.bareInformers = append(f.bareInformers, informers...)
+ return f
+}
+
+// WithInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions.
+// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
+// is called.
+// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
+func (f *Factory) WithInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, informers ...Informer) *Factory {
+ f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
+ informers: informers,
+ queueKeyFn: func(o runtime.Object) []string {
+ return []string{queueKeyFn(o)}
+ },
+ })
+ return f
+}
+
+// WithFilteredEventsInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions.
+// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
+// is called.
+// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
+// Pass filter to filter out events that should not trigger Sync() call.
+func (f *Factory) WithFilteredEventsInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, filter EventFilterFunc, informers ...Informer) *Factory {
+ f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
+ informers: informers,
+ filter: filter,
+ queueKeyFn: func(o runtime.Object) []string {
+ return []string{queueKeyFn(o)}
+ },
+ })
+ return f
+}
+
+// WithInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions.
+// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
+// is called.
+// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
+func (f *Factory) WithInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, informers ...Informer) *Factory {
+ f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
+ informers: informers,
+ queueKeyFn: queueKeyFn,
+ })
+ return f
+}
+
+// WithFilteredEventsInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions.
+// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function
+// is called.
+// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue.
+// Pass filter to filter out events that should not trigger Sync() call.
+func (f *Factory) WithFilteredEventsInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, filter EventFilterFunc, informers ...Informer) *Factory {
+ f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{
+ informers: informers,
+ filter: filter,
+ queueKeyFn: queueKeyFn,
+ })
+ return f
+}
+
+// WithPostStartHooks allows to register functions that will run asynchronously after the controller is started via Run command.
+func (f *Factory) WithPostStartHooks(hooks ...PostStartHook) *Factory {
+ f.postStartHooks = append(f.postStartHooks, hooks...)
+ return f
+}
+
+// WithNamespaceInformer is used to register event handlers and get the caches synchronized functions.
+// The sync function will only trigger when the object observed by this informer is a namespace and its name matches the interestingNamespaces.
+// Do not use this to register non-namespace informers.
+func (f *Factory) WithNamespaceInformer(informer Informer, interestingNamespaces ...string) *Factory {
+ f.namespaceInformers = append(f.namespaceInformers, &namespaceInformer{
+ informer: informer,
+ nsFilter: namespaceChecker(interestingNamespaces),
+ })
+ return f
+}
+
+// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers.
+// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked.
+// If this is not called, no periodical resync will happen.
+// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself.
+//
+// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects.
+func (f *Factory) ResyncEvery(interval time.Duration) *Factory {
+ f.resyncInterval = interval
+ return f
+}
+
+// ResyncSchedule allows to supply a Cron syntax schedule that will be used to schedule the sync() call runs.
+// This allows more fine-tuned controller scheduling than ResyncEvery.
+// Examples:
+//
+// factory.New().ResyncSchedule("@every 1s").ToController() // Every second
+// factory.New().ResyncSchedule("@hourly").ToController() // Every hour
+// factory.New().ResyncSchedule("30 * * * *").ToController() // Every hour on the half hour
+//
+// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself.
+//
+// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects.
+func (f *Factory) ResyncSchedule(schedules ...string) *Factory {
+ f.resyncSchedules = append(f.resyncSchedules, schedules...)
+ return f
+}
+
+// WithSyncContext allows to specify custom, existing sync context for this factory.
+// This is useful during unit testing where you can override the default event recorder or mock the runtime objects.
+// If this function not called, a SyncContext is created by the factory automatically.
+func (f *Factory) WithSyncContext(ctx SyncContext) *Factory {
+ f.syncContext = ctx
+ return f
+}
+
+// WithSyncDegradedOnError encapsulate the controller sync() function, so when this function return an error, the operator client
+// is used to set the degraded condition to (eg. "ControllerFooDegraded"). The degraded condition name is set based on the controller name.
+func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.OperatorClient) *Factory {
+ f.syncDegradedClient = operatorClient
+ return f
+}
+
+// Controller produce a runnable controller.
+func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller {
+ if f.sync == nil {
+ panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name))
+ }
+
+ var ctx SyncContext
+ if f.syncContext != nil {
+ ctx = f.syncContext
+ } else {
+ ctx = NewSyncContext(name, eventRecorder)
+ }
+
+ var cronSchedules []cron.Schedule
+ if len(f.resyncSchedules) > 0 {
+ var errors []error
+ for _, schedule := range f.resyncSchedules {
+ if s, err := cron.ParseStandard(schedule); err != nil {
+ errors = append(errors, err)
+ } else {
+ cronSchedules = append(cronSchedules, s)
+ }
+ }
+ if err := errorutil.NewAggregate(errors); err != nil {
+ panic(fmt.Errorf("failed to parse controller schedules for %q: %v", name, err))
+ }
+ }
+
+ c := &baseController{
+ name: name,
+ syncDegradedClient: f.syncDegradedClient,
+ sync: f.sync,
+ resyncEvery: f.resyncInterval,
+ resyncSchedules: cronSchedules,
+ cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...),
+ syncContext: ctx,
+ postStartHooks: f.postStartHooks,
+ cacheSyncTimeout: defaultCacheSyncTimeout,
+ }
+
+ // Warn about too fast resyncs as they might drain the operators QPS.
+ // This event is cheap as it is only emitted on operator startup.
+ if c.resyncEvery.Seconds() < 60 {
+ ctx.Recorder().Warningf("FastControllerResync", "Controller %q resync interval is set to %s which might lead to client request throttling", name, c.resyncEvery)
+ }
+
+ for i := range f.informerQueueKeys {
+ for d := range f.informerQueueKeys[i].informers {
+ informer := f.informerQueueKeys[i].informers[d]
+ queueKeyFn := f.informerQueueKeys[i].queueKeyFn
+ informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter))
+ c.cachesToSync = append(c.cachesToSync, informer.HasSynced)
+ }
+ }
+
+ for i := range f.informers {
+ for d := range f.informers[i].informers {
+ informer := f.informers[i].informers[d]
+ informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter))
+ c.cachesToSync = append(c.cachesToSync, informer.HasSynced)
+ }
+ }
+
+ for i := range f.bareInformers {
+ c.cachesToSync = append(c.cachesToSync, f.bareInformers[i].HasSynced)
+ }
+
+ for i := range f.namespaceInformers {
+ f.namespaceInformers[i].informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.namespaceInformers[i].nsFilter))
+ c.cachesToSync = append(c.cachesToSync, f.namespaceInformers[i].informer.HasSynced)
+ }
+
+ return c
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go
new file mode 100644
index 0000000000..0ef98c6701
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go
@@ -0,0 +1,47 @@
+package factory
+
+import (
+ "context"
+
+ "k8s.io/client-go/util/workqueue"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+)
+
+// Controller interface represents a runnable Kubernetes controller.
+// Cancelling the syncContext passed will cause the controller to shutdown.
+// Number of workers determine how much parallel the job processing should be.
+type Controller interface {
+ // Run runs the controller and blocks until the controller is finished.
+ // Number of workers can be specified via workers parameter.
+ // This function will return when all internal loops are finished.
+ // Note that having more than one worker usually means handing parallelization of Sync().
+ Run(ctx context.Context, workers int)
+
+ // Sync contain the main controller logic.
+ // This should not be called directly, but can be used in unit tests to exercise the sync.
+ Sync(ctx context.Context, controllerContext SyncContext) error
+
+ // Name returns the controller name string.
+ Name() string
+}
+
+// SyncContext interface represents a context given to the Sync() function where the main controller logic happen.
+// SyncContext exposes controller name and give user access to the queue (for manual requeue).
+// SyncContext also provides metadata about object that informers observed as changed.
+type SyncContext interface {
+ // Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return
+ // an error, the object is automatically re-queued. Use with caution.
+ Queue() workqueue.RateLimitingInterface
+
+ // QueueKey represents the queue key passed to the Sync function.
+ QueueKey() string
+
+ // Recorder provide access to event recorder.
+ Recorder() events.Recorder
+}
+
+// SyncFunc is a function that contain main controller logic.
+// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down.
+// The syncContext provides access to controller name, queue and event recorder.
+type SyncFunc func(ctx context.Context, controllerContext SyncContext) error
diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS
new file mode 100644
index 0000000000..4d4ce5ab9e
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS
@@ -0,0 +1,4 @@
+reviewers:
+ - stlaz
+approvers:
+ - stlaz
diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go
new file mode 100644
index 0000000000..554112c49b
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go
@@ -0,0 +1,1252 @@
+package crypto
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ mathrand "math/rand"
+ "net"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "k8s.io/klog/v2"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/client-go/util/cert"
+)
+
+// TLS versions that are known to golang. Go 1.13 adds support for
+// TLS 1.3 that's opt-out with a build flag.
+var versions = map[string]uint16{
+ "VersionTLS10": tls.VersionTLS10,
+ "VersionTLS11": tls.VersionTLS11,
+ "VersionTLS12": tls.VersionTLS12,
+ "VersionTLS13": tls.VersionTLS13,
+}
+
+// TLS versions that are enabled.
+var supportedVersions = map[string]uint16{
+ "VersionTLS10": tls.VersionTLS10,
+ "VersionTLS11": tls.VersionTLS11,
+ "VersionTLS12": tls.VersionTLS12,
+ "VersionTLS13": tls.VersionTLS13,
+}
+
+// TLSVersionToNameOrDie given a tls version as an int, return its readable name
+func TLSVersionToNameOrDie(intVal uint16) string {
+ matches := []string{}
+ for key, version := range versions {
+ if version == intVal {
+ matches = append(matches, key)
+ }
+ }
+
+ if len(matches) == 0 {
+ panic(fmt.Sprintf("no name found for %d", intVal))
+ }
+ if len(matches) > 1 {
+ panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches))
+ }
+ return matches[0]
+}
+
+func TLSVersion(versionName string) (uint16, error) {
+ if len(versionName) == 0 {
+ return DefaultTLSVersion(), nil
+ }
+ if version, ok := versions[versionName]; ok {
+ return version, nil
+ }
+ return 0, fmt.Errorf("unknown tls version %q", versionName)
+}
+func TLSVersionOrDie(versionName string) uint16 {
+ version, err := TLSVersion(versionName)
+ if err != nil {
+ panic(err)
+ }
+ return version
+}
+
+// TLS versions that are known to golang, but may not necessarily be enabled.
+func GolangTLSVersions() []string {
+ supported := []string{}
+ for k := range versions {
+ supported = append(supported, k)
+ }
+ sort.Strings(supported)
+ return supported
+}
+
+// Returns the build enabled TLS versions.
+func ValidTLSVersions() []string {
+ validVersions := []string{}
+ for k := range supportedVersions {
+ validVersions = append(validVersions, k)
+ }
+ sort.Strings(validVersions)
+ return validVersions
+}
+func DefaultTLSVersion() uint16 {
+ // Can't use SSLv3 because of POODLE and BEAST
+ // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
+ // Can't use TLSv1.1 because of RC4 cipher usage
+ return tls.VersionTLS12
+}
+
+// ciphersTLS13 copies golang 1.13 implementation, where TLS1.3 suites are not
+// configurable (cipherSuites field is ignored for TLS1.3 flows and all of the
+// below three - and none other - are used)
+var ciphersTLS13 = map[string]uint16{
+ "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256,
+ "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384,
+ "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256,
+}
+
+var ciphers = map[string]uint16{
+ "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+}
+
+// openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names
+// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
+var openSSLToIANACiphersMap = map[string]string{
+ // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows
+ // "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01
+ // "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02
+ // "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03
+
+ // TLS 1.2
+ "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B
+ "ECDHE-RSA-AES128-GCM-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2F
+ "ECDHE-ECDSA-AES256-GCM-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x2C
+ "ECDHE-RSA-AES256-GCM-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x30
+ "ECDHE-ECDSA-CHACHA20-POLY1305": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA9
+ "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA8
+ "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23
+ "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27
+ "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C
+ "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D
+ "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C
+
+ // TLS 1
+ "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09
+ "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13
+ "ECDHE-ECDSA-AES256-SHA": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", // 0xC0,0x0A
+ "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14
+
+ // SSL 3
+ "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F
+ "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35
+ "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A
+}
+
+// CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names
+func CipherSuitesToNamesOrDie(intVals []uint16) []string {
+ ret := []string{}
+ for _, intVal := range intVals {
+ ret = append(ret, CipherSuiteToNameOrDie(intVal))
+ }
+
+ return ret
+}
+
+// CipherSuiteToNameOrDie given a cipher suite as an int, return its readable name
+func CipherSuiteToNameOrDie(intVal uint16) string {
+ // The following suite ids appear twice in the cipher map (with
+ // and without the _SHA256 suffix) for the purposes of backwards
+ // compatibility. Always return the current rather than the legacy
+ // name.
+ switch intVal {
+ case tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:
+ return "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256"
+ case tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:
+ return "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256"
+ }
+
+ matches := []string{}
+ for key, version := range ciphers {
+ if version == intVal {
+ matches = append(matches, key)
+ }
+ }
+
+ if len(matches) == 0 {
+ panic(fmt.Sprintf("no name found for %d", intVal))
+ }
+ if len(matches) > 1 {
+ panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches))
+ }
+ return matches[0]
+}
+
+func CipherSuite(cipherName string) (uint16, error) {
+ if cipher, ok := ciphers[cipherName]; ok {
+ return cipher, nil
+ }
+
+ if _, ok := ciphersTLS13[cipherName]; ok {
+ return 0, fmt.Errorf("all golang TLSv1.3 ciphers are always used for TLSv1.3 flows")
+ }
+
+ return 0, fmt.Errorf("unknown cipher name %q", cipherName)
+}
+
+func CipherSuitesOrDie(cipherNames []string) []uint16 {
+ if len(cipherNames) == 0 {
+ return DefaultCiphers()
+ }
+ cipherValues := []uint16{}
+ for _, cipherName := range cipherNames {
+ cipher, err := CipherSuite(cipherName)
+ if err != nil {
+ panic(err)
+ }
+ cipherValues = append(cipherValues, cipher)
+ }
+ return cipherValues
+}
+func ValidCipherSuites() []string {
+ validCipherSuites := []string{}
+ for k := range ciphers {
+ validCipherSuites = append(validCipherSuites, k)
+ }
+ sort.Strings(validCipherSuites)
+ return validCipherSuites
+}
+func DefaultCiphers() []uint16 {
+ // HTTP/2 mandates TLS 1.2 or higher with an AEAD cipher
+ // suite (GCM, Poly1305) and ephemeral key exchange (ECDHE, DHE) for
+ // perfect forward secrecy. Servers may provide additional cipher
+ // suites for backwards compatibility with HTTP/1.1 clients.
+ // See RFC7540, section 9.2 (Use of TLS Features) and Appendix A
+ // (TLS 1.2 Cipher Suite Black List).
+ return []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by http/2
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // forbidden by http/2
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // forbidden by http/2
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // forbidden by http/2
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // forbidden by http/2
+ // the next one is in the intermediate suite, but go1.8 http2isBadCipher() complains when it is included at the recommended index
+ // because it comes after ciphers forbidden by the http/2 spec
+ // tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ // tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack
+ // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2
+ }
+}
+
+// SecureTLSConfig enforces the default minimum security settings for the cluster.
+func SecureTLSConfig(config *tls.Config) *tls.Config {
+ if config.MinVersion == 0 {
+ config.MinVersion = DefaultTLSVersion()
+ }
+
+ config.PreferServerCipherSuites = true
+ if len(config.CipherSuites) == 0 {
+ config.CipherSuites = DefaultCiphers()
+ }
+ return config
+}
+
+// OpenSSLToIANACipherSuites maps input OpenSSL Cipher Suite names to their
+// IANA counterparts.
+// Unknown ciphers are left out.
+func OpenSSLToIANACipherSuites(ciphers []string) []string {
+ ianaCiphers := make([]string, 0, len(ciphers))
+
+ for _, c := range ciphers {
+ ianaCipher, found := openSSLToIANACiphersMap[c]
+ if found {
+ ianaCiphers = append(ianaCiphers, ianaCipher)
+ }
+ }
+
+ return ianaCiphers
+}
+
+type TLSCertificateConfig struct {
+ Certs []*x509.Certificate
+ Key crypto.PrivateKey
+}
+
+type TLSCARoots struct {
+ Roots []*x509.Certificate
+}
+
+func (c *TLSCertificateConfig) WriteCertConfigFile(certFile, keyFile string) error {
+ // ensure parent dir
+ if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil {
+ return err
+ }
+ certFileWriter, err := os.OpenFile(certFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+ if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil {
+ return err
+ }
+ keyFileWriter, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+ if err != nil {
+ return err
+ }
+
+ if err := writeCertificates(certFileWriter, c.Certs...); err != nil {
+ return err
+ }
+ if err := writeKeyFile(keyFileWriter, c.Key); err != nil {
+ return err
+ }
+
+ if err := certFileWriter.Close(); err != nil {
+ return err
+ }
+ if err := keyFileWriter.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *TLSCertificateConfig) WriteCertConfig(certFile, keyFile io.Writer) error {
+ if err := writeCertificates(certFile, c.Certs...); err != nil {
+ return err
+ }
+ if err := writeKeyFile(keyFile, c.Key); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *TLSCertificateConfig) GetPEMBytes() ([]byte, []byte, error) {
+ certBytes, err := EncodeCertificates(c.Certs...)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyBytes, err := encodeKey(c.Key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return certBytes, keyBytes, nil
+}
+
+func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, error) {
+ if len(certFile) == 0 {
+ return nil, errors.New("certFile missing")
+ }
+ if len(keyFile) == 0 {
+ return nil, errors.New("keyFile missing")
+ }
+
+ certPEMBlock, err := os.ReadFile(certFile)
+ if err != nil {
+ return nil, err
+ }
+ certs, err := cert.ParseCertsPEM(certPEMBlock)
+ if err != nil {
+ return nil, fmt.Errorf("Error reading %s: %s", certFile, err)
+ }
+
+ keyPEMBlock, err := os.ReadFile(keyFile)
+ if err != nil {
+ return nil, err
+ }
+ keyPairCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ if err != nil {
+ return nil, err
+ }
+ key := keyPairCert.PrivateKey
+
+ return &TLSCertificateConfig{certs, key}, nil
+}
+
+func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertificateConfig, error) {
+ if len(certBytes) == 0 {
+ return nil, errors.New("certFile missing")
+ }
+ if len(keyBytes) == 0 {
+ return nil, errors.New("keyFile missing")
+ }
+
+ certs, err := cert.ParseCertsPEM(certBytes)
+ if err != nil {
+ return nil, fmt.Errorf("Error reading cert: %s", err)
+ }
+
+ keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes)
+ if err != nil {
+ return nil, err
+ }
+ key := keyPairCert.PrivateKey
+
+ return &TLSCertificateConfig{certs, key}, nil
+}
+
+const (
+ DefaultCertificateLifetimeInDays = 365 * 2 // 2 years
+ DefaultCACertificateLifetimeInDays = 365 * 5 // 5 years
+
+ // Default keys are 2048 bits
+ keyBits = 2048
+)
+
+type CA struct {
+ Config *TLSCertificateConfig
+
+ SerialGenerator SerialGenerator
+}
+
+// SerialGenerator is an interface for getting a serial number for the cert. It MUST be thread-safe.
+type SerialGenerator interface {
+ Next(template *x509.Certificate) (int64, error)
+}
+
+// SerialFileGenerator returns a unique, monotonically increasing serial number and ensures the CA on disk records that value.
+type SerialFileGenerator struct {
+ SerialFile string
+
+ // lock guards access to the Serial field
+ lock sync.Mutex
+ Serial int64
+}
+
+func NewSerialFileGenerator(serialFile string) (*SerialFileGenerator, error) {
+ // read serial file, it must already exist
+ serial, err := fileToSerial(serialFile)
+ if err != nil {
+ return nil, err
+ }
+
+ generator := &SerialFileGenerator{
+ Serial: serial,
+ SerialFile: serialFile,
+ }
+
+ // 0 is unused and 1 is reserved for the CA itself
+ // Thus we need to guarantee that the first external call to SerialFileGenerator.Next returns 2+
+ // meaning that SerialFileGenerator.Serial must not be less than 1 (it is guaranteed to be non-negative)
+ if generator.Serial < 1 {
+ // fake a call to Next so the file stays in sync and Serial is incremented
+ if _, err := generator.Next(&x509.Certificate{}); err != nil {
+ return nil, err
+ }
+ }
+
+ return generator, nil
+}
+
+// Next returns a unique, monotonically increasing serial number and ensures the CA on disk records that value.
+func (s *SerialFileGenerator) Next(template *x509.Certificate) (int64, error) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ // do a best effort check to make sure concurrent external writes are not occurring to the underlying serial file
+ serial, err := fileToSerial(s.SerialFile)
+ if err != nil {
+ return 0, err
+ }
+ if serial != s.Serial {
+ return 0, fmt.Errorf("serial file %s out of sync ram=%d disk=%d", s.SerialFile, s.Serial, serial)
+ }
+
+ next := s.Serial + 1
+ s.Serial = next
+
+ // Output in hex, padded to multiples of two characters for OpenSSL's sake
+ serialText := fmt.Sprintf("%X", next)
+ if len(serialText)%2 == 1 {
+ serialText = "0" + serialText
+ }
+ // always add a newline at the end to have a valid file
+ serialText += "\n"
+
+ if err := os.WriteFile(s.SerialFile, []byte(serialText), os.FileMode(0640)); err != nil {
+ return 0, err
+ }
+ return next, nil
+}
+
+func fileToSerial(serialFile string) (int64, error) {
+ serialData, err := os.ReadFile(serialFile)
+ if err != nil {
+ return 0, err
+ }
+
+ // read the file as a single hex number after stripping any whitespace
+ serial, err := strconv.ParseInt(string(bytes.TrimSpace(serialData)), 16, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ if serial < 0 {
+ return 0, fmt.Errorf("invalid negative serial %d in serial file %s", serial, serialFile)
+ }
+
+ return serial, nil
+}
+
+// RandomSerialGenerator returns a serial based on time.Now and the subject
+type RandomSerialGenerator struct {
+}
+
+func (s *RandomSerialGenerator) Next(template *x509.Certificate) (int64, error) {
+ return randomSerialNumber(), nil
+}
+
+// randomSerialNumber returns a random int64 serial number based on
+// time.Now. It is defined separately from the generator interface so
+// that the caller doesn't have to worry about an input template or
+// error - these are unnecessary when creating a random serial.
+func randomSerialNumber() int64 {
+ r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano()))
+ return r.Int63()
+}
+
+// EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error
+// if serialFile is empty, a RandomSerialGenerator will be used
+func EnsureCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) {
+ if ca, err := GetCA(certFile, keyFile, serialFile); err == nil {
+ return ca, false, err
+ }
+ ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, expireDays)
+ return ca, true, err
+}
+
+// if serialFile is empty, a RandomSerialGenerator will be used
+func GetCA(certFile, keyFile, serialFile string) (*CA, error) {
+ caConfig, err := GetTLSCertificateConfig(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ var serialGenerator SerialGenerator
+ if len(serialFile) > 0 {
+ serialGenerator, err = NewSerialFileGenerator(serialFile)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ serialGenerator = &RandomSerialGenerator{}
+ }
+
+ return &CA{
+ SerialGenerator: serialGenerator,
+ Config: caConfig,
+ }, nil
+}
+
+func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) {
+ caConfig, err := GetTLSCertificateConfigFromBytes(certBytes, keyBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return &CA{
+ SerialGenerator: &RandomSerialGenerator{},
+ Config: caConfig,
+ }, nil
+}
+
+// if serialFile is empty, a RandomSerialGenerator will be used
+func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) {
+ klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile)
+
+ caConfig, err := MakeSelfSignedCAConfig(name, expireDays)
+ if err != nil {
+ return nil, err
+ }
+ if err := caConfig.WriteCertConfigFile(certFile, keyFile); err != nil {
+ return nil, err
+ }
+
+ var serialGenerator SerialGenerator
+ if len(serialFile) > 0 {
+ // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file)
+ if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil {
+ return nil, err
+ }
+ serialGenerator, err = NewSerialFileGenerator(serialFile)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ serialGenerator = &RandomSerialGenerator{}
+ }
+
+ return &CA{
+ SerialGenerator: serialGenerator,
+ Config: caConfig,
+ }, nil
+}
+
+func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) {
+ subject := pkix.Name{CommonName: name}
+ return MakeSelfSignedCAConfigForSubject(subject, expireDays)
+}
+
+func MakeSelfSignedCAConfigForSubject(subject pkix.Name, expireDays int) (*TLSCertificateConfig, error) {
+ var caLifetimeInDays = DefaultCACertificateLifetimeInDays
+ if expireDays > 0 {
+ caLifetimeInDays = expireDays
+ }
+
+ if caLifetimeInDays > DefaultCACertificateLifetimeInDays {
+ warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays)
+ }
+
+ caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour
+ return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime)
+}
+
+func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) {
+ subject := pkix.Name{CommonName: name}
+ return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime)
+}
+
+func makeSelfSignedCAConfigForSubjectAndDuration(subject pkix.Name, caLifetime time.Duration) (*TLSCertificateConfig, error) {
+ // Create CA cert
+ rootcaPublicKey, rootcaPrivateKey, publicKeyHash, err := newKeyPairWithHash()
+ if err != nil {
+ return nil, err
+ }
+ // AuthorityKeyId and SubjectKeyId should match for a self-signed CA
+ authorityKeyId := publicKeyHash
+ subjectKeyId := publicKeyHash
+ rootcaTemplate := newSigningCertificateTemplateForDuration(subject, caLifetime, time.Now, authorityKeyId, subjectKeyId)
+ rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ caConfig := &TLSCertificateConfig{
+ Certs: []*x509.Certificate{rootcaCert},
+ Key: rootcaPrivateKey,
+ }
+ return caConfig, nil
+}
+
+func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) (*TLSCertificateConfig, error) {
+ // Create CA cert
+ signerPublicKey, signerPrivateKey, publicKeyHash, err := newKeyPairWithHash()
+ if err != nil {
+ return nil, err
+ }
+ authorityKeyId := issuer.Config.Certs[0].SubjectKeyId
+ subjectKeyId := publicKeyHash
+ signerTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now, authorityKeyId, subjectKeyId)
+ signerCert, err := issuer.signCertificate(signerTemplate, signerPublicKey)
+ if err != nil {
+ return nil, err
+ }
+ signerConfig := &TLSCertificateConfig{
+ Certs: append([]*x509.Certificate{signerCert}, issuer.Config.Certs...),
+ Key: signerPrivateKey,
+ }
+ return signerConfig, nil
+}
+
+// EnsureSubCA returns a subCA signed by the `ca`, whether it was created
+// (as opposed to pre-existing), and any error that might occur during the subCA
+// creation.
+// If serialFile is an empty string, a RandomSerialGenerator will be used.
+func (ca *CA) EnsureSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) {
+ if subCA, err := GetCA(certFile, keyFile, serialFile); err == nil {
+ return subCA, false, err
+ }
+ subCA, err := ca.MakeAndWriteSubCA(certFile, keyFile, serialFile, name, expireDays)
+ return subCA, true, err
+}
+
+// MakeAndWriteSubCA returns a new sub-CA configuration. New cert/key pair is generated
+// while using this function.
+// If serialFile is an empty string, a RandomSerialGenerator will be used.
+func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) {
+ klog.V(4).Infof("Generating sub-CA certificate in %s, key in %s, serial in %s", certFile, keyFile, serialFile)
+
+ subCAConfig, err := MakeCAConfigForDuration(name, time.Duration(expireDays)*time.Hour*24, ca)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := subCAConfig.WriteCertConfigFile(certFile, keyFile); err != nil {
+ return nil, err
+ }
+
+ var serialGenerator SerialGenerator
+ if len(serialFile) > 0 {
+ // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file)
+ if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil {
+ return nil, err
+ }
+
+ serialGenerator, err = NewSerialFileGenerator(serialFile)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ serialGenerator = &RandomSerialGenerator{}
+ }
+
+ return &CA{
+ Config: subCAConfig,
+ SerialGenerator: serialGenerator,
+ }, nil
+}
+
+func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, bool, error) {
+ certConfig, err := GetServerCert(certFile, keyFile, hostnames)
+ if err != nil {
+ certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, expireDays)
+ return certConfig, true, err
+ }
+
+ return certConfig, false, nil
+}
+
+func GetServerCert(certFile, keyFile string, hostnames sets.String) (*TLSCertificateConfig, error) {
+ server, err := GetTLSCertificateConfig(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ cert := server.Certs[0]
+ ips, dns := IPAddressesDNSNames(hostnames.List())
+ missingIps := ipsNotInSlice(ips, cert.IPAddresses)
+ missingDns := stringsNotInSlice(dns, cert.DNSNames)
+ if len(missingIps) == 0 && len(missingDns) == 0 {
+ klog.V(4).Infof("Found existing server certificate in %s", certFile)
+ return server, nil
+ }
+
+ return nil, fmt.Errorf("Existing server certificate in %s was missing some hostnames (%v) or IP addresses (%v).", certFile, missingDns, missingIps)
+}
+
+func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, error) {
+ klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile)
+
+ server, err := ca.MakeServerCert(hostnames, expireDays)
+ if err != nil {
+ return nil, err
+ }
+ if err := server.WriteCertConfigFile(certFile, keyFile); err != nil {
+ return server, err
+ }
+ return server, nil
+}
+
+// CertificateExtensionFunc is passed a certificate that it may extend, or return an error
+// if the extension attempt failed.
+type CertificateExtensionFunc func(*x509.Certificate) error
+
+func (ca *CA) MakeServerCert(hostnames sets.String, expireDays int, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) {
+ serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash()
+ authorityKeyId := ca.Config.Certs[0].SubjectKeyId
+ subjectKeyId := publicKeyHash
+ serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), expireDays, time.Now, authorityKeyId, subjectKeyId)
+ for _, fn := range fns {
+ if err := fn(serverTemplate); err != nil {
+ return nil, err
+ }
+ }
+ serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey)
+ if err != nil {
+ return nil, err
+ }
+ server := &TLSCertificateConfig{
+ Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...),
+ Key: serverPrivateKey,
+ }
+ return server, nil
+}
+
+func (ca *CA) MakeServerCertForDuration(hostnames sets.String, lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) {
+ serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash()
+ authorityKeyId := ca.Config.Certs[0].SubjectKeyId
+ subjectKeyId := publicKeyHash
+ serverTemplate := newServerCertificateTemplateForDuration(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), lifetime, time.Now, authorityKeyId, subjectKeyId)
+ for _, fn := range fns {
+ if err := fn(serverTemplate); err != nil {
+ return nil, err
+ }
+ }
+ serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey)
+ if err != nil {
+ return nil, err
+ }
+ server := &TLSCertificateConfig{
+ Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...),
+ Key: serverPrivateKey,
+ }
+ return server, nil
+}
+
+func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, bool, error) {
+ certConfig, err := GetClientCertificate(certFile, keyFile, u)
+ if err != nil {
+ certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, expireDays)
+ return certConfig, true, err // true indicates we wrote the files.
+ }
+ return certConfig, false, nil
+}
+
+func GetClientCertificate(certFile, keyFile string, u user.Info) (*TLSCertificateConfig, error) {
+ certConfig, err := GetTLSCertificateConfig(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ if subject := certConfig.Certs[0].Subject; subjectChanged(subject, userToSubject(u)) {
+ return nil, fmt.Errorf("existing client certificate in %s was issued for a different Subject (%s)",
+ certFile, subject)
+ }
+
+ return certConfig, nil
+}
+
+func subjectChanged(existing, expected pkix.Name) bool {
+ sort.Strings(existing.Organization)
+ sort.Strings(expected.Organization)
+
+ return existing.CommonName != expected.CommonName ||
+ existing.SerialNumber != expected.SerialNumber ||
+ !reflect.DeepEqual(existing.Organization, expected.Organization)
+}
+
+func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, error) {
+ klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile)
+ // ensure parent dirs
+ if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil {
+ return nil, err
+ }
+ if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil {
+ return nil, err
+ }
+
+ clientPublicKey, clientPrivateKey, _ := NewKeyPair()
+ clientTemplate := newClientCertificateTemplate(userToSubject(u), expireDays, time.Now)
+ clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ certData, err := EncodeCertificates(clientCrt)
+ if err != nil {
+ return nil, err
+ }
+ keyData, err := encodeKey(clientPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = os.WriteFile(certFile, certData, os.FileMode(0644)); err != nil {
+ return nil, err
+ }
+ if err = os.WriteFile(keyFile, keyData, os.FileMode(0600)); err != nil {
+ return nil, err
+ }
+
+ return GetTLSCertificateConfig(certFile, keyFile)
+}
+
+func (ca *CA) MakeClientCertificateForDuration(u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) {
+ clientPublicKey, clientPrivateKey, _ := NewKeyPair()
+ clientTemplate := newClientCertificateTemplateForDuration(userToSubject(u), lifetime, time.Now)
+ clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ certData, err := EncodeCertificates(clientCrt)
+ if err != nil {
+ return nil, err
+ }
+ keyData, err := encodeKey(clientPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return GetTLSCertificateConfigFromBytes(certData, keyData)
+}
+
+type sortedForDER []string
+
+func (s sortedForDER) Len() int {
+ return len(s)
+}
+func (s sortedForDER) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s sortedForDER) Less(i, j int) bool {
+ l1 := len(s[i])
+ l2 := len(s[j])
+ if l1 == l2 {
+ return s[i] < s[j]
+ }
+ return l1 < l2
+}
+
+func userToSubject(u user.Info) pkix.Name {
+ // Ok we are going to order groups in a peculiar way here to workaround a
+ // 2 bugs, 1 in golang (https://github.com/golang/go/issues/24254) which
+ // incorrectly encodes Multivalued RDNs and another in GNUTLS clients
+ // which are too picky (https://gitlab.com/gnutls/gnutls/issues/403)
+ // and try to "correct" this issue when reading client certs.
+ //
+ // This workaround should be killed once Golang's pkix module is fixed to
+ // generate a correct DER encoding.
+ //
+ // The workaround relies on the fact that the first octect that differs
+ // between the encoding of two group RDNs will end up being the encoded
+ // length which is directly related to the group name's length. So we'll
+ // sort such that shortest names come first.
+ ugroups := u.GetGroups()
+ groups := make([]string, len(ugroups))
+ copy(groups, ugroups)
+ sort.Sort(sortedForDER(groups))
+
+ return pkix.Name{
+ CommonName: u.GetName(),
+ SerialNumber: u.GetUID(),
+ Organization: groups,
+ }
+}
+
+func (ca *CA) signCertificate(template *x509.Certificate, requestKey crypto.PublicKey) (*x509.Certificate, error) {
+ // Increment and persist serial
+ serial, err := ca.SerialGenerator.Next(template)
+ if err != nil {
+ return nil, err
+ }
+ template.SerialNumber = big.NewInt(serial)
+ return signCertificate(template, requestKey, ca.Config.Certs[0], ca.Config.Key)
+}
+
+func NewKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) {
+ return newRSAKeyPair()
+}
+
+func newKeyPairWithHash() (crypto.PublicKey, crypto.PrivateKey, []byte, error) {
+ publicKey, privateKey, err := newRSAKeyPair()
+ var publicKeyHash []byte
+ if err == nil {
+ hash := sha1.New()
+ hash.Write(publicKey.N.Bytes())
+ publicKeyHash = hash.Sum(nil)
+ }
+ return publicKey, privateKey, publicKeyHash, err
+}
+
+func newRSAKeyPair() (*rsa.PublicKey, *rsa.PrivateKey, error) {
+ privateKey, err := rsa.GenerateKey(rand.Reader, keyBits)
+ if err != nil {
+ return nil, nil, err
+ }
+ return &privateKey.PublicKey, privateKey, nil
+}
+
+// Can be used for CA or intermediate signing certs
+func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate {
+ return &x509.Certificate{
+ Subject: subject,
+
+ SignatureAlgorithm: x509.SHA256WithRSA,
+
+ NotBefore: currentTime().Add(-1 * time.Second),
+ NotAfter: currentTime().Add(caLifetime),
+
+ // Specify a random serial number to avoid the same issuer+serial
+ // number referring to different certs in a chain of trust if the
+ // signing certificate is ever rotated.
+ SerialNumber: big.NewInt(randomSerialNumber()),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IsCA: true,
+
+ AuthorityKeyId: authorityKeyId,
+ SubjectKeyId: subjectKeyId,
+ }
+}
+
+// Can be used for ListenAndServeTLS
+func newServerCertificateTemplate(subject pkix.Name, hosts []string, expireDays int, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate {
+ var lifetimeInDays = DefaultCertificateLifetimeInDays
+ if expireDays > 0 {
+ lifetimeInDays = expireDays
+ }
+
+ if lifetimeInDays > DefaultCertificateLifetimeInDays {
+ warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays)
+ }
+
+ lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour
+
+ return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime, authorityKeyId, subjectKeyId)
+}
+
+// Can be used for ListenAndServeTLS
+func newServerCertificateTemplateForDuration(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate {
+ template := &x509.Certificate{
+ Subject: subject,
+
+ SignatureAlgorithm: x509.SHA256WithRSA,
+
+ NotBefore: currentTime().Add(-1 * time.Second),
+ NotAfter: currentTime().Add(lifetime),
+ SerialNumber: big.NewInt(1),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+
+ AuthorityKeyId: authorityKeyId,
+ SubjectKeyId: subjectKeyId,
+ }
+
+ template.IPAddresses, template.DNSNames = IPAddressesDNSNames(hosts)
+
+ return template
+}
+
+func IPAddressesDNSNames(hosts []string) ([]net.IP, []string) {
+ ips := []net.IP{}
+ dns := []string{}
+ for _, host := range hosts {
+ if ip := net.ParseIP(host); ip != nil {
+ ips = append(ips, ip)
+ } else {
+ dns = append(dns, host)
+ }
+ }
+
+ // Include IP addresses as DNS subjectAltNames in the cert as well, for the sake of Python, Windows (< 10), and unnamed other libraries
+ // Ensure these technically invalid DNS subjectAltNames occur after the valid ones, to avoid triggering cert errors in Firefox
+ // See https://bugzilla.mozilla.org/show_bug.cgi?id=1148766
+ for _, ip := range ips {
+ dns = append(dns, ip.String())
+ }
+
+ return ips, dns
+}
+
+func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) {
+ ok := false
+ certs := []*x509.Certificate{}
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ break
+ }
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ continue
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return certs, err
+ }
+
+ certs = append(certs, cert)
+ ok = true
+ }
+
+ if !ok {
+ return certs, errors.New("Could not read any certificates")
+ }
+ return certs, nil
+}
+
+// Can be used as a certificate in http.Transport TLSClientConfig
+func newClientCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate {
+ var lifetimeInDays = DefaultCertificateLifetimeInDays
+ if expireDays > 0 {
+ lifetimeInDays = expireDays
+ }
+
+ if lifetimeInDays > DefaultCertificateLifetimeInDays {
+ warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays)
+ }
+
+ lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour
+
+ return newClientCertificateTemplateForDuration(subject, lifetime, currentTime)
+}
+
+// Can be used as a certificate in http.Transport TLSClientConfig
+func newClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate {
+ return &x509.Certificate{
+ Subject: subject,
+
+ SignatureAlgorithm: x509.SHA256WithRSA,
+
+ NotBefore: currentTime().Add(-1 * time.Second),
+ NotAfter: currentTime().Add(lifetime),
+ SerialNumber: big.NewInt(1),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
+ BasicConstraintsValid: true,
+ }
+}
+
+func warnAboutCertificateLifeTime(name string, defaultLifetimeInDays int) {
+ defaultLifetimeInYears := defaultLifetimeInDays / 365
+ fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears)
+ fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!")
+}
+
+func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, issuer *x509.Certificate, issuerKey crypto.PrivateKey) (*x509.Certificate, error) {
+ derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey)
+ if err != nil {
+ return nil, err
+ }
+ certs, err := x509.ParseCertificates(derBytes)
+ if err != nil {
+ return nil, err
+ }
+ if len(certs) != 1 {
+ return nil, errors.New("Expected a single certificate")
+ }
+ return certs[0], nil
+}
+
+func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) {
+ b := bytes.Buffer{}
+ for _, cert := range certs {
+ if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
+ return []byte{}, err
+ }
+ }
+ return b.Bytes(), nil
+}
+func encodeKey(key crypto.PrivateKey) ([]byte, error) {
+ b := bytes.Buffer{}
+ switch key := key.(type) {
+ case *ecdsa.PrivateKey:
+ keyBytes, err := x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return []byte{}, err
+ }
+ if err := pem.Encode(&b, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil {
+ return b.Bytes(), err
+ }
+ case *rsa.PrivateKey:
+ if err := pem.Encode(&b, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil {
+ return []byte{}, err
+ }
+ default:
+ return []byte{}, errors.New("Unrecognized key type")
+
+ }
+ return b.Bytes(), nil
+}
+
+func writeCertificates(f io.Writer, certs ...*x509.Certificate) error {
+ bytes, err := EncodeCertificates(certs...)
+ if err != nil {
+ return err
+ }
+ if _, err := f.Write(bytes); err != nil {
+ return err
+ }
+
+ return nil
+}
+func writeKeyFile(f io.Writer, key crypto.PrivateKey) error {
+ bytes, err := encodeKey(key)
+ if err != nil {
+ return err
+ }
+ if _, err := f.Write(bytes); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func stringsNotInSlice(needles []string, haystack []string) []string {
+ missing := []string{}
+ for _, needle := range needles {
+ if !stringInSlice(needle, haystack) {
+ missing = append(missing, needle)
+ }
+ }
+ return missing
+}
+
+func stringInSlice(needle string, haystack []string) bool {
+ for _, straw := range haystack {
+ if needle == straw {
+ return true
+ }
+ }
+ return false
+}
+
+func ipsNotInSlice(needles []net.IP, haystack []net.IP) []net.IP {
+ missing := []net.IP{}
+ for _, needle := range needles {
+ if !ipInSlice(needle, haystack) {
+ missing = append(missing, needle)
+ }
+ }
+ return missing
+}
+
+func ipInSlice(needle net.IP, haystack []net.IP) bool {
+ for _, straw := range haystack {
+ if needle.Equal(straw) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go
new file mode 100644
index 0000000000..0aa127037c
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go
@@ -0,0 +1,20 @@
+package crypto
+
+import (
+ "crypto/x509"
+ "time"
+)
+
+// FilterExpiredCerts checks are all certificates in the bundle valid, i.e. they have not expired.
+// The function returns new bundle with only valid certificates or error if no valid certificate is found.
+func FilterExpiredCerts(certs ...*x509.Certificate) []*x509.Certificate {
+ currentTime := time.Now()
+ var validCerts []*x509.Certificate
+ for _, c := range certs {
+ if c.NotAfter.After(currentTime) {
+ validCerts = append(validCerts, c)
+ }
+ }
+
+ return validCerts
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go
new file mode 100644
index 0000000000..1a522609a5
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go
@@ -0,0 +1,72 @@
+package condition
+
+const (
+ // ManagementStateDegradedConditionType is true when the operator ManagementState is not "Managed"..
+ // Possible reasons are Unmanaged, Removed or Unknown. Any of these cases means the operator is not actively managing the operand.
+ // This condition is set to false when the ManagementState is set to back to "Managed".
+ ManagementStateDegradedConditionType = "ManagementStateDegraded"
+
+ // UnsupportedConfigOverridesUpgradeableConditionType is true when operator unsupported config overrides is changed.
+ // When NoUnsupportedConfigOverrides reason is given it means there are no unsupported config overrides.
+ // When UnsupportedConfigOverridesSet reason is given it means the unsupported config overrides are set, which might impact the ability
+ // of operator to successfully upgrade its operand.
+ UnsupportedConfigOverridesUpgradeableConditionType = "UnsupportedConfigOverridesUpgradeable"
+
+ // MonitoringResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the ServiceMonitor
+ // CR resource, which is required by monitoring operator to collect Prometheus data from the operator. When this condition is true and the ServiceMonitor
+ // is already created, it won't have impact on collecting metrics. However, if the ServiceMonitor was not created, the metrics won't be available for
+ // collection until this condition is set to false.
+ // The condition is set to false automatically when the operator successfully synchronize the ServiceMonitor resource.
+ MonitoringResourceControllerDegradedConditionType = "MonitoringResourceControllerDegraded"
+
+ // BackingResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the resources needed
+ // to successfully run the installer pods (installer CRB and SA). If these were already created, this condition is not fatal, however if the resources
+ // were not created it means the installer pod creation will fail.
+ // This condition is set to false when the operator can successfully synchronize installer SA and CRB.
+ BackingResourceControllerDegradedConditionType = "BackingResourceControllerDegraded"
+
+ // StaticPodsDegradedConditionType is true when the operator observe errors when installing the new revision static pods.
+ // This condition report Error reason when the pods are terminated or not ready or waiting during which the operand quality of service is degraded.
+ // This condition is set to False when the pods change state to running and are observed ready.
+ StaticPodsDegradedConditionType = "StaticPodsDegraded"
+
+ // StaticPodsAvailableConditionType is true when the static pod is available on at least one node.
+ StaticPodsAvailableConditionType = "StaticPodsAvailable"
+
+ // ConfigObservationDegradedConditionType is true when the operator failed to observe or process configuration change.
+ // This is not transient condition and normally a correction or manual intervention is required on the config custom resource.
+ ConfigObservationDegradedConditionType = "ConfigObservationDegraded"
+
+ // ResourceSyncControllerDegradedConditionType is true when the operator failed to synchronize one or more secrets or config maps required
+ // to run the operand. Operand ability to provide service might be affected by this condition.
+ // This condition is set to false when the operator is able to create secrets and config maps.
+ ResourceSyncControllerDegradedConditionType = "ResourceSyncControllerDegraded"
+
+ // CertRotationDegradedConditionTypeFmt is true when the operator failed to properly rotate one or more certificates required by the operand.
+ // The RotationError reason is given with message describing details of this failure. This condition can be fatal when ignored as the existing certificate(s)
+ // validity can expire and without rotating/renewing them manual recovery might be required to fix the cluster.
+ CertRotationDegradedConditionTypeFmt = "CertRotation_%s_Degraded"
+
+ // InstallerControllerDegradedConditionType is true when the operator is not able to create new installer pods so the new revisions
+ // cannot be rolled out. This might happen when one or more required secrets or config maps does not exists.
+ // In case the missing secret or config map is available, this condition is automatically set to false.
+ InstallerControllerDegradedConditionType = "InstallerControllerDegraded"
+
+ // NodeInstallerDegradedConditionType is true when the operator is not able to create new installer pods because there are no schedulable nodes
+ // available to run the installer pods.
+ // The AllNodesAtLatestRevision reason is set when all master nodes are updated to the latest revision. It is false when some masters are pending revision.
+ // ZeroNodesActive reason is set to True when no active master nodes are observed. Is set to False when there is at least one active master node.
+ NodeInstallerDegradedConditionType = "NodeInstallerDegraded"
+
+ // NodeInstallerProgressingConditionType is true when the operator is moving nodes to a new revision.
+ NodeInstallerProgressingConditionType = "NodeInstallerProgressing"
+
+ // RevisionControllerDegradedConditionType is true when the operator is not able to create new desired revision because an error occurred when
+ // the operator attempted to created required resource(s) (secrets, configmaps, ...).
+ // This condition mean no new revision will be created.
+ RevisionControllerDegradedConditionType = "RevisionControllerDegraded"
+
+ // NodeControllerDegradedConditionType is true when the operator observed a master node that is not ready.
+ // Note that a node is not ready when its Condition.NodeReady wasn't set to true
+ NodeControllerDegradedConditionType = "NodeControllerDegraded"
+)
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/OWNERS
new file mode 100644
index 0000000000..582d671017
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/OWNERS
@@ -0,0 +1,8 @@
+reviewers:
+ - tkashem
+ - p0lyn0mial
+ - sttts
+approvers:
+ - tkashem
+ - p0lyn0mial
+ - sttts
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/listers.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/listers.go
new file mode 100644
index 0000000000..2c0d1bda3e
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/listers.go
@@ -0,0 +1,9 @@
+package apiserver
+
+import (
+ configlistersv1 "github.com/openshift/client-go/config/listers/config/v1"
+)
+
+type APIServerLister interface {
+ APIServerLister() configlistersv1.APIServerLister
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_audit.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_audit.go
new file mode 100644
index 0000000000..39aa79cb62
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_audit.go
@@ -0,0 +1,88 @@
+package apiserver
+
+import (
+ "fmt"
+
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/klog/v2"
+
+ "github.com/openshift/library-go/pkg/operator/configobserver"
+ "github.com/openshift/library-go/pkg/operator/events"
+)
+
+// AuditPolicyPathGetterFunc allows the observer to be agnostic of the source of audit profile(s).
+// The function returns the path to the audit policy file (associated with the
+// given profile) in the static manifest folder.
+type AuditPolicyPathGetterFunc func(profile string) (string, error)
+
+// NewAuditObserver returns an ObserveConfigFunc that observes the audit field of the APIServer resource
+// and sets the apiServerArguments:audit-policy-file field for the apiserver appropriately.
+func NewAuditObserver(pathGetter AuditPolicyPathGetterFunc) configobserver.ObserveConfigFunc {
+ var (
+ apiServerArgumentsAuditPath = []string{"apiServerArguments", "audit-policy-file"}
+ )
+
+ return func(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observed map[string]interface{}, _ []error) {
+ defer func() {
+ observed = configobserver.Pruned(observed, apiServerArgumentsAuditPath)
+ }()
+
+ errs := []error{}
+
+ // if the function encounters an error it returns existing/current config, which means that
+ // some other entity (default config in bindata ) must ensure to default the configuration.
+ // otherwise, the apiserver won't have a path to audit policy file and it will fail to start.
+ listers := genericListers.(APIServerLister)
+ apiServer, err := listers.APIServerLister().Get("cluster")
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ klog.Warningf("apiserver.config.openshift.io/cluster: not found")
+
+ return existingConfig, errs
+ }
+
+ return existingConfig, append(errs, err)
+ }
+
+ desiredProfile := string(apiServer.Spec.Audit.Profile)
+ if len(desiredProfile) == 0 {
+ // The specified Profile is empty, so let the defaulting layer choose a default for us.
+ return map[string]interface{}{}, errs
+ }
+
+ desiredAuditPolicyPath, err := pathGetter(desiredProfile)
+ if err != nil {
+ return existingConfig, append(errs, fmt.Errorf("audit profile is not valid name=%s", desiredProfile))
+ }
+
+ currentAuditPolicyPath, err := getCurrentPolicyPath(existingConfig, apiServerArgumentsAuditPath...)
+ if err != nil {
+ return existingConfig, append(errs, fmt.Errorf("audit profile is not valid name=%s", desiredProfile))
+ }
+ if desiredAuditPolicyPath == currentAuditPolicyPath {
+ return existingConfig, errs
+ }
+
+ // we have a change of audit policy here!
+ observedConfig := map[string]interface{}{}
+ if err := unstructured.SetNestedStringSlice(observedConfig, []string{desiredAuditPolicyPath}, apiServerArgumentsAuditPath...); err != nil {
+ return existingConfig, append(errs, fmt.Errorf("failed to set desired audit profile in observed config name=%s", desiredProfile))
+ }
+
+ recorder.Eventf("ObserveAPIServerArgumentsAudit", "audit policy has been set to profile=%s", desiredProfile)
+ return observedConfig, errs
+ }
+}
+
+func getCurrentPolicyPath(existing map[string]interface{}, fields ...string) (string, error) {
+ current, _, err := unstructured.NestedStringSlice(existing, fields...)
+ if err != nil {
+ return "", err
+ }
+ if len(current) == 0 {
+ return "", nil
+ }
+
+ return current[0], nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_cors.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_cors.go
new file mode 100644
index 0000000000..9046e9534b
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_cors.go
@@ -0,0 +1,75 @@
+package apiserver
+
+import (
+ "k8s.io/klog/v2"
+
+ "github.com/openshift/library-go/pkg/operator/configobserver"
+ "github.com/openshift/library-go/pkg/operator/events"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+var clusterDefaultCORSAllowedOrigins = []string{
+ `//127\.0\.0\.1(:|$)`,
+ `//localhost(:|$)`,
+}
+
+// ObserveAdditionalCORSAllowedOrigins observes the additionalCORSAllowedOrigins field
+// of the APIServer resource and sets the corsAllowedOrigins field of observedConfig
+func ObserveAdditionalCORSAllowedOrigins(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) {
+ return innerObserveAdditionalCORSAllowedOrigins(genericListers, recorder, existingConfig, []string{"corsAllowedOrigins"})
+}
+
+// ObserveAdditionalCORSAllowedOriginsToArguments observes the additionalCORSAllowedOrigins field
+// of the APIServer resource and sets the cors-allowed-origins field in observedConfig.apiServerArguments
+func ObserveAdditionalCORSAllowedOriginsToArguments(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) {
+ return innerObserveAdditionalCORSAllowedOrigins(genericListers, recorder, existingConfig, []string{"apiServerArguments", "cors-allowed-origins"})
+}
+
+func innerObserveAdditionalCORSAllowedOrigins(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}, corsAllowedOriginsPath []string) (ret map[string]interface{}, _ []error) {
+ defer func() {
+ ret = configobserver.Pruned(ret, corsAllowedOriginsPath)
+ }()
+
+ lister := genericListers.(APIServerLister)
+ errs := []error{}
+ defaultConfig := map[string]interface{}{}
+ if err := unstructured.SetNestedStringSlice(defaultConfig, clusterDefaultCORSAllowedOrigins, corsAllowedOriginsPath...); err != nil {
+ // this should not happen
+ return existingConfig, append(errs, err)
+ }
+
+ // grab the current CORS origins to later check whether they were updated
+ currentCORSAllowedOrigins, _, err := unstructured.NestedStringSlice(existingConfig, corsAllowedOriginsPath...)
+ if err != nil {
+ errs = append(errs, err)
+ // keep going on read error from existing config
+ }
+ currentCORSSet := sets.NewString(currentCORSAllowedOrigins...)
+ currentCORSSet.Insert(clusterDefaultCORSAllowedOrigins...)
+
+ observedConfig := map[string]interface{}{}
+ apiServer, err := lister.APIServerLister().Get("cluster")
+ if errors.IsNotFound(err) {
+ klog.Warningf("apiserver.config.openshift.io/cluster: not found")
+ return defaultConfig, errs
+ }
+ if err != nil {
+ // return existingConfig here in case err is just a transient error so
+ // that we don't rewrite the config that was observed previously
+ return existingConfig, append(errs, err)
+ }
+
+ newCORSSet := sets.NewString(clusterDefaultCORSAllowedOrigins...)
+ newCORSSet.Insert(apiServer.Spec.AdditionalCORSAllowedOrigins...)
+ if err := unstructured.SetNestedStringSlice(observedConfig, newCORSSet.List(), corsAllowedOriginsPath...); err != nil {
+ return existingConfig, append(errs, err)
+ }
+
+ if !currentCORSSet.Equal(newCORSSet) {
+ recorder.Eventf("ObserveAdditionalCORSAllowedOrigins", "corsAllowedOrigins changed to %q", newCORSSet.List())
+ }
+
+ return observedConfig, errs
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile.go
new file mode 100644
index 0000000000..7bbab9e42c
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile.go
@@ -0,0 +1,103 @@
+package apiserver
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/klog/v2"
+
+ configv1 "github.com/openshift/api/config/v1"
+ "github.com/openshift/library-go/pkg/crypto"
+ "github.com/openshift/library-go/pkg/operator/configobserver"
+ "github.com/openshift/library-go/pkg/operator/events"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+// ObserveTLSSecurityProfile observes APIServer.Spec.TLSSecurityProfile field and sets
+// the ServingInfo.MinTLSVersion, ServingInfo.CipherSuites fields of observed config
+func ObserveTLSSecurityProfile(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) {
+ return innerTLSSecurityProfileObservations(genericListers, recorder, existingConfig, []string{"servingInfo", "minTLSVersion"}, []string{"servingInfo", "cipherSuites"})
+}
+
+// ObserveTLSSecurityProfileToArguments observes APIServer.Spec.TLSSecurityProfile field and sets
+// the tls-min-version and tls-cipher-suites fileds of observedConfig.apiServerArguments
+func ObserveTLSSecurityProfileToArguments(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) {
+ return innerTLSSecurityProfileObservations(genericListers, recorder, existingConfig, []string{"apiServerArguments", "tls-min-version"}, []string{"apiServerArguments", "tls-cipher-suites"})
+}
+
+func innerTLSSecurityProfileObservations(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}, minTLSVersionPath, cipherSuitesPath []string) (ret map[string]interface{}, _ []error) {
+ defer func() {
+ ret = configobserver.Pruned(ret, minTLSVersionPath, cipherSuitesPath)
+ }()
+
+ listers := genericListers.(APIServerLister)
+ errs := []error{}
+
+ currentMinTLSVersion, _, versionErr := unstructured.NestedString(existingConfig, minTLSVersionPath...)
+ if versionErr != nil {
+ errs = append(errs, fmt.Errorf("failed to retrieve spec.servingInfo.minTLSVersion: %v", versionErr))
+ // keep going on read error from existing config
+ }
+
+ currentCipherSuites, _, suitesErr := unstructured.NestedStringSlice(existingConfig, cipherSuitesPath...)
+ if suitesErr != nil {
+ errs = append(errs, fmt.Errorf("failed to retrieve spec.servingInfo.cipherSuites: %v", suitesErr))
+ // keep going on read error from existing config
+ }
+
+ apiServer, err := listers.APIServerLister().Get("cluster")
+ if errors.IsNotFound(err) {
+ klog.Warningf("apiserver.config.openshift.io/cluster: not found")
+ apiServer = &configv1.APIServer{}
+ } else if err != nil {
+ return existingConfig, append(errs, err)
+ }
+
+ observedConfig := map[string]interface{}{}
+ observedMinTLSVersion, observedCipherSuites := getSecurityProfileCiphers(apiServer.Spec.TLSSecurityProfile)
+ if err = unstructured.SetNestedField(observedConfig, observedMinTLSVersion, minTLSVersionPath...); err != nil {
+ return existingConfig, append(errs, err)
+ }
+ if err = unstructured.SetNestedStringSlice(observedConfig, observedCipherSuites, cipherSuitesPath...); err != nil {
+ return existingConfig, append(errs, err)
+ }
+
+ if observedMinTLSVersion != currentMinTLSVersion {
+ recorder.Eventf("ObserveTLSSecurityProfile", "minTLSVersion changed to %s", observedMinTLSVersion)
+ }
+ if !reflect.DeepEqual(observedCipherSuites, currentCipherSuites) {
+ recorder.Eventf("ObserveTLSSecurityProfile", "cipherSuites changed to %q", observedCipherSuites)
+ }
+
+ return observedConfig, errs
+}
+
+// Extracts the minimum TLS version and cipher suites from TLSSecurityProfile object,
+// Converts the ciphers to IANA names as supported by Kube ServingInfo config.
+// If profile is nil, returns config defined by the Intermediate TLS Profile
+func getSecurityProfileCiphers(profile *configv1.TLSSecurityProfile) (string, []string) {
+ var profileType configv1.TLSProfileType
+ if profile == nil {
+ profileType = configv1.TLSProfileIntermediateType
+ } else {
+ profileType = profile.Type
+ }
+
+ var profileSpec *configv1.TLSProfileSpec
+ if profileType == configv1.TLSProfileCustomType {
+ if profile.Custom != nil {
+ profileSpec = &profile.Custom.TLSProfileSpec
+ }
+ } else {
+ profileSpec = configv1.TLSProfiles[profileType]
+ }
+
+ // nothing found / custom type set but no actual custom spec
+ if profileSpec == nil {
+ profileSpec = configv1.TLSProfiles[configv1.TLSProfileIntermediateType]
+ }
+
+ // need to remap all Ciphers to their respective IANA names used by Go
+ return string(profileSpec.MinTLSVersion), crypto.OpenSSLToIANACipherSuites(profileSpec.Ciphers)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go
new file mode 100644
index 0000000000..3b9f611808
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go
@@ -0,0 +1,284 @@
+package configobserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/imdario/mergo"
+ "k8s.io/klog/v2"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/diff"
+ "k8s.io/apimachinery/pkg/util/rand"
+ "k8s.io/client-go/tools/cache"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+
+ "github.com/openshift/library-go/pkg/controller/factory"
+ "github.com/openshift/library-go/pkg/operator/condition"
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/management"
+ "github.com/openshift/library-go/pkg/operator/resourcesynccontroller"
+ "github.com/openshift/library-go/pkg/operator/v1helpers"
+)
+
+// Listers is an interface which will be passed to the config observer funcs. It is expected to be hard-cast to the "correct" type
+type Listers interface {
+ // ResourceSyncer can be used to copy content from one namespace to another
+ ResourceSyncer() resourcesynccontroller.ResourceSyncer
+ PreRunHasSynced() []cache.InformerSynced
+}
+
+// ObserveConfigFunc observes configuration and returns the observedConfig. This function should not return an
+// observedConfig that would cause the service being managed by the operator to crash. For example, if a required
+// configuration key cannot be observed, consider reusing the configuration key's previous value. Errors that occur
+// while attempting to generate the observedConfig should be returned in the errs slice.
+type ObserveConfigFunc func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error)
+
+type ConfigObserver struct {
+ // observers are called in an undefined order and their results are merged to
+ // determine the observed configuration.
+ observers []ObserveConfigFunc
+
+ operatorClient v1helpers.OperatorClient
+
+ // listers are used by config observers to retrieve necessary resources
+ listers Listers
+
+ nestedConfigPath []string
+ degradedConditionType string
+}
+
+func NewConfigObserver(
+ operatorClient v1helpers.OperatorClient,
+ eventRecorder events.Recorder,
+ listers Listers,
+ informers []factory.Informer,
+ observers ...ObserveConfigFunc,
+) factory.Controller {
+ return NewNestedConfigObserver(
+ operatorClient,
+ eventRecorder,
+ listers,
+ informers,
+ nil,
+ "",
+ observers...,
+ )
+}
+
+// NewNestedConfigObserver creates a config observer that watches changes to a nested field (nestedConfigPath) in the config.
+// Useful when the config is shared across multiple controllers in the same process.
+//
+// Example:
+//
+// Given the following configuration, you could run two separate controllers and point each to its own section.
+// The first controller would be responsible for "oauthAPIServer" and the second for "oauthServer" section.
+//
+// "observedConfig": {
+// "oauthAPIServer": {
+// "apiServerArguments": {"tls-min-version": "VersionTLS12"}
+// },
+// "oauthServer": {
+// "corsAllowedOrigins": [ "//127\\.0\\.0\\.1(:|$)","//localhost(:|$)"]
+// }
+// }
+//
+// oauthAPIController := NewNestedConfigObserver(..., []string{"oauthAPIServer"}
+// oauthServerController := NewNestedConfigObserver(..., []string{"oauthServer"}
+func NewNestedConfigObserver(
+ operatorClient v1helpers.OperatorClient,
+ eventRecorder events.Recorder,
+ listers Listers,
+ informers []factory.Informer,
+ nestedConfigPath []string,
+ degradedConditionPrefix string,
+ observers ...ObserveConfigFunc,
+) factory.Controller {
+ c := &ConfigObserver{
+ operatorClient: operatorClient,
+ observers: observers,
+ listers: listers,
+ nestedConfigPath: nestedConfigPath,
+ degradedConditionType: degradedConditionPrefix + condition.ConfigObservationDegradedConditionType,
+ }
+
+ return factory.New().ResyncEvery(time.Minute).WithSync(c.sync).WithInformers(append(informers, listersToInformer(listers)...)...).ToController("ConfigObserver", eventRecorder.WithComponentSuffix("config-observer"))
+}
+
+// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This
+// must be information that is logically "owned" by another component.
+func (c ConfigObserver) sync(ctx context.Context, syncCtx factory.SyncContext) error {
+ originalSpec, _, _, err := c.operatorClient.GetOperatorState()
+ if management.IsOperatorRemovable() && apierrors.IsNotFound(err) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ spec := originalSpec.DeepCopy()
+
+ // don't worry about errors. If we can't decode, we'll simply stomp over the field.
+ existingConfig := map[string]interface{}{}
+ if err := json.NewDecoder(bytes.NewBuffer(spec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil {
+ klog.V(4).Infof("decode of existing config failed with error: %v", err)
+ }
+
+ var errs []error
+ var observedConfigs []map[string]interface{}
+ for _, i := range rand.Perm(len(c.observers)) {
+ var currErrs []error
+ observedConfig, currErrs := c.observers[i](c.listers, syncCtx.Recorder(), existingConfig)
+ observedConfigs = append(observedConfigs, observedConfig)
+ errs = append(errs, currErrs...)
+ }
+
+ mergedObservedConfig := map[string]interface{}{}
+ for _, observedConfig := range observedConfigs {
+ if err := mergo.Merge(&mergedObservedConfig, observedConfig); err != nil {
+ klog.Warningf("merging observed config failed: %v", err)
+ }
+ }
+
+ reverseMergedObservedConfig := map[string]interface{}{}
+ for i := len(observedConfigs) - 1; i >= 0; i-- {
+ if err := mergo.Merge(&reverseMergedObservedConfig, observedConfigs[i]); err != nil {
+ klog.Warningf("merging observed config failed: %v", err)
+ }
+ }
+
+ if !equality.Semantic.DeepEqual(mergedObservedConfig, reverseMergedObservedConfig) {
+ errs = append(errs, errors.New("non-deterministic config observation detected"))
+ }
+
+ if err := c.updateObservedConfig(ctx, syncCtx, existingConfig, mergedObservedConfig); err != nil {
+ errs = []error{err}
+ }
+ configError := v1helpers.NewMultiLineAggregate(errs)
+
+ // update failing condition
+ cond := operatorv1.OperatorCondition{
+ Type: c.degradedConditionType,
+ Status: operatorv1.ConditionFalse,
+ }
+ if configError != nil {
+ cond.Status = operatorv1.ConditionTrue
+ cond.Reason = "Error"
+ cond.Message = configError.Error()
+ }
+ if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil {
+ return updateError
+ }
+
+ return configError
+}
+
+func (c ConfigObserver) updateObservedConfig(ctx context.Context, syncCtx factory.SyncContext, existingConfig map[string]interface{}, mergedObservedConfig map[string]interface{}) error {
+ if len(c.nestedConfigPath) == 0 {
+ if !equality.Semantic.DeepEqual(existingConfig, mergedObservedConfig) {
+ syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.ObjectDiff(existingConfig, mergedObservedConfig))
+ return c.updateConfig(ctx, syncCtx, mergedObservedConfig, v1helpers.UpdateObservedConfigFn)
+ }
+ return nil
+ }
+
+ existingConfigNested, _, err := unstructured.NestedMap(existingConfig, c.nestedConfigPath...)
+ if err != nil {
+ return fmt.Errorf("unable to extract the config under %v key, err %v", c.nestedConfigPath, err)
+ }
+ mergedObservedConfigNested, _, err := unstructured.NestedMap(mergedObservedConfig, c.nestedConfigPath...)
+ if err != nil {
+ return fmt.Errorf("unable to extract the merged config under %v, err %v", c.nestedConfigPath, err)
+ }
+ if !equality.Semantic.DeepEqual(existingConfigNested, mergedObservedConfigNested) {
+ syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated section (%q) of observed config: %q", strings.Join(c.nestedConfigPath, "/"), diff.ObjectDiff(existingConfigNested, mergedObservedConfigNested))
+ return c.updateConfig(ctx, syncCtx, mergedObservedConfigNested, c.updateNestedConfigHelper)
+ }
+ return nil
+}
+
+type updateObservedConfigFn func(config map[string]interface{}) v1helpers.UpdateOperatorSpecFunc
+
+func (c ConfigObserver) updateConfig(ctx context.Context, syncCtx factory.SyncContext, updatedMaybeNestedConfig map[string]interface{}, updateConfigHelper updateObservedConfigFn) error {
+ if _, _, err := v1helpers.UpdateSpec(ctx, c.operatorClient, updateConfigHelper(updatedMaybeNestedConfig)); err != nil {
+ // At this point we failed to write the updated config. If we are permanently broken, do not pile the errors from observers
+ // but instead reset the errors and only report single error condition.
+ syncCtx.Recorder().Warningf("ObservedConfigWriteError", "Failed to write observed config: %v", err)
+ return fmt.Errorf("error writing updated observed config: %v", err)
+ }
+ return nil
+}
+
+// updateNestedConfigHelper returns a helper function for updating the nested config.
+func (c ConfigObserver) updateNestedConfigHelper(updatedNestedConfig map[string]interface{}) v1helpers.UpdateOperatorSpecFunc {
+ return func(currentSpec *operatorv1.OperatorSpec) error {
+ existingConfig := map[string]interface{}{}
+ if err := json.NewDecoder(bytes.NewBuffer(currentSpec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil {
+ klog.V(4).Infof("decode of existing config failed with error: %v", err)
+ }
+ if err := unstructured.SetNestedField(existingConfig, updatedNestedConfig, c.nestedConfigPath...); err != nil {
+ return fmt.Errorf("unable to set the nested (%q) observed config: %v", strings.Join(c.nestedConfigPath, "/"), err)
+ }
+ currentSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: existingConfig}}
+ return nil
+ }
+}
+
+// listersToInformer converts the Listers interface to informer with empty AddEventHandler as we only care about synced caches in the Run.
+func listersToInformer(l Listers) []factory.Informer {
+ result := make([]factory.Informer, len(l.PreRunHasSynced()))
+ for i := range l.PreRunHasSynced() {
+ result[i] = &listerInformer{cacheSynced: l.PreRunHasSynced()[i]}
+ }
+ return result
+}
+
+type listerInformer struct {
+ cacheSynced cache.InformerSynced
+}
+
+func (l *listerInformer) AddEventHandler(cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) {
+ return nil, nil
+}
+
+func (l *listerInformer) HasSynced() bool {
+ return l.cacheSynced()
+}
+
+// WithPrefix adds a prefix to the path the input observer would otherwise observe into
+func WithPrefix(observer ObserveConfigFunc, prefix ...string) ObserveConfigFunc {
+ if len(prefix) == 0 {
+ return observer
+ }
+
+ return func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) {
+ errs := []error{}
+
+ nestedExistingConfig, _, err := unstructured.NestedMap(existingConfig, prefix...)
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ orig, observerErrs := observer(listers, recorder, nestedExistingConfig)
+ errs = append(errs, observerErrs...)
+
+ if orig == nil {
+ return nil, errs
+ }
+
+ ret := map[string]interface{}{}
+ if err := unstructured.SetNestedField(ret, orig, prefix...); err != nil {
+ errs = append(errs, err)
+ }
+ return ret, errs
+
+ }
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go
new file mode 100644
index 0000000000..27b92d0fad
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go
@@ -0,0 +1,45 @@
+package configobserver
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// Pruned returns the unstructured filtered by the given paths, i.e. everything
+// outside of them will be dropped. The returned data structure might overlap
+// with the input, but the input is not mutated. In case of error for a path,
+// that path is dropped.
+func Pruned(obj map[string]interface{}, pths ...[]string) map[string]interface{} {
+ if obj == nil || len(pths) == 0 {
+ return obj
+ }
+
+ ret := map[string]interface{}{}
+ if len(pths) == 1 {
+ x, found, err := unstructured.NestedFieldCopy(obj, pths[0]...)
+ if err != nil || !found {
+ return ret
+ }
+ unstructured.SetNestedField(ret, x, pths[0]...)
+ return ret
+ }
+
+ for i, p := range pths {
+ x, found, err := unstructured.NestedFieldCopy(obj, p...)
+ if err != nil {
+ continue
+ }
+ if !found {
+ continue
+ }
+ if i < len(pths)-1 {
+ // this might be overwritten by a later path
+ x = runtime.DeepCopyJSONValue(x)
+ }
+ if err := unstructured.SetNestedField(ret, x, p...); err != nil {
+ continue
+ }
+ }
+
+ return ret
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS
new file mode 100644
index 0000000000..4f189b7087
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS
@@ -0,0 +1,8 @@
+reviewers:
+ - mfojtik
+ - deads2k
+ - sttts
+approvers:
+ - mfojtik
+ - deads2k
+ - sttts
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go
new file mode 100644
index 0000000000..f513a90f3c
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go
@@ -0,0 +1,238 @@
+package events
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/klog/v2"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+)
+
+// Recorder is a simple event recording interface.
+type Recorder interface {
+ Event(reason, message string)
+ Eventf(reason, messageFmt string, args ...interface{})
+ Warning(reason, message string)
+ Warningf(reason, messageFmt string, args ...interface{})
+
+ // ForComponent allows to fiddle the component name before sending the event to sink.
+ // Making more unique components will prevent the spam filter in upstream event sink from dropping
+ // events.
+ ForComponent(componentName string) Recorder
+
+ // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding.
+ WithComponentSuffix(componentNameSuffix string) Recorder
+
+ // WithContext allows to set a context for event create API calls.
+ WithContext(ctx context.Context) Recorder
+
+ // ComponentName returns the current source component name for the event.
+ // This allows to suffix the original component name with 'sub-component'.
+ ComponentName() string
+
+ Shutdown()
+}
+
+// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set.
+// This replica set name is then used as a source/involved object for operator events.
+const podNameEnv = "POD_NAME"
+
+// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests).
+var podNameEnvFunc = func() string {
+ return os.Getenv(podNameEnv)
+}
+
+// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs.
+// The pod name must be provided via the POD_NAME name.
+// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging
+// and decide to fail or accept the namespace.
+func GetControllerReferenceForCurrentPod(ctx context.Context, client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) {
+ if reference == nil {
+ // Try to get the pod name via POD_NAME environment variable
+ reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace}
+ if len(reference.Name) != 0 {
+ return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, reference)
+ }
+ // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list
+ reference, err := guessControllerReferenceForNamespace(ctx, client.CoreV1().Pods(targetNamespace))
+ if err != nil {
+ // If this fails, do not give up with error but instead use the namespace as controller reference for the pod
+ // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster.
+ // In some cases this might help with flakes.
+ return getControllerReferenceForNamespace(targetNamespace), err
+ }
+ return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, reference)
+ }
+
+ switch reference.Kind {
+ case "Pod":
+ pod, err := client.CoreV1().Pods(reference.Namespace).Get(ctx, reference.Name, metav1.GetOptions{})
+ if err != nil {
+ return getControllerReferenceForNamespace(reference.Namespace), err
+ }
+ if podController := metav1.GetControllerOf(pod); podController != nil {
+ return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, makeObjectReference(podController, targetNamespace))
+ }
+ // This is a bare pod without any ownerReference
+ return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil
+ case "ReplicaSet":
+ rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(ctx, reference.Name, metav1.GetOptions{})
+ if err != nil {
+ return getControllerReferenceForNamespace(reference.Namespace), err
+ }
+ if rsController := metav1.GetControllerOf(rs); rsController != nil {
+ return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, makeObjectReference(rsController, targetNamespace))
+ }
+ // This is a replicaSet without any ownerReference
+ return reference, nil
+ default:
+ return reference, nil
+ }
+}
+
+// getControllerReferenceForNamespace returns an object reference to the given namespace.
+func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference {
+ return &corev1.ObjectReference{
+ Kind: "Namespace",
+ Namespace: targetNamespace,
+ Name: targetNamespace,
+ APIVersion: "v1",
+ }
+}
+
+// makeObjectReference makes object reference from ownerReference and target namespace
+func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference {
+ return &corev1.ObjectReference{
+ Kind: owner.Kind,
+ Namespace: targetNamespace,
+ Name: owner.Name,
+ UID: owner.UID,
+ APIVersion: owner.APIVersion,
+ }
+}
+
+// guessControllerReferenceForNamespace tries to guess what resource to reference.
+func guessControllerReferenceForNamespace(ctx context.Context, client corev1client.PodInterface) (*corev1.ObjectReference, error) {
+ pods, err := client.List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+ if len(pods.Items) == 0 {
+ return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv)
+ }
+
+ for _, pod := range pods.Items {
+ ownerRef := metav1.GetControllerOf(&pod)
+ if ownerRef == nil {
+ continue
+ }
+ return &corev1.ObjectReference{
+ Kind: ownerRef.Kind,
+ Namespace: pod.Namespace,
+ Name: ownerRef.Name,
+ UID: ownerRef.UID,
+ APIVersion: ownerRef.APIVersion,
+ }, nil
+ }
+ return nil, errors.New("can't guess controller ref")
+}
+
+// NewRecorder returns new event recorder.
+func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder {
+ return &recorder{
+ eventClient: client,
+ involvedObjectRef: involvedObjectRef,
+ sourceComponent: sourceComponentName,
+ }
+}
+
+// recorder is an implementation of Recorder interface.
+type recorder struct {
+ eventClient corev1client.EventInterface
+ involvedObjectRef *corev1.ObjectReference
+ sourceComponent string
+
+ // TODO: This is not the right way to pass the context, but there is no other way without breaking event interface
+ ctx context.Context
+}
+
+func (r *recorder) ComponentName() string {
+ return r.sourceComponent
+}
+
+func (r *recorder) Shutdown() {}
+
+func (r *recorder) ForComponent(componentName string) Recorder {
+ newRecorderForComponent := *r
+ newRecorderForComponent.sourceComponent = componentName
+ return &newRecorderForComponent
+}
+
+func (r *recorder) WithContext(ctx context.Context) Recorder {
+ r.ctx = ctx
+ return r
+}
+
+func (r *recorder) WithComponentSuffix(suffix string) Recorder {
+ return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
+}
+
+// Event emits the normal type event and allow formatting of message.
+func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) {
+ r.Event(reason, fmt.Sprintf(messageFmt, args...))
+}
+
+// Warning emits the warning type event and allow formatting of message.
+func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) {
+ r.Warning(reason, fmt.Sprintf(messageFmt, args...))
+}
+
+// Event emits the normal type event.
+func (r *recorder) Event(reason, message string) {
+ event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message)
+ ctx := context.Background()
+ if r.ctx != nil {
+ ctx = r.ctx
+ }
+ if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil {
+ klog.Warningf("Error creating event %+v: %v", event, err)
+ }
+}
+
+// Warning emits the warning type event.
+func (r *recorder) Warning(reason, message string) {
+ event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message)
+ ctx := context.Background()
+ if r.ctx != nil {
+ ctx = r.ctx
+ }
+ if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil {
+ klog.Warningf("Error creating event %+v: %v", event, err)
+ }
+}
+
+func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event {
+ currentTime := metav1.Time{Time: time.Now()}
+ event := &corev1.Event{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()),
+ Namespace: involvedObjRef.Namespace,
+ },
+ InvolvedObject: *involvedObjRef,
+ Reason: reason,
+ Message: message,
+ Type: eventType,
+ Count: 1,
+ FirstTimestamp: currentTime,
+ LastTimestamp: currentTime,
+ }
+ event.Source.Component = sourceComponent
+ return event
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go
new file mode 100644
index 0000000000..75efe3e192
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go
@@ -0,0 +1,86 @@
+package events
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/klog/v2"
+)
+
+type inMemoryEventRecorder struct {
+ events []*corev1.Event
+ source string
+ ctx context.Context
+ sync.Mutex
+}
+
+// inMemoryDummyObjectReference is used for fake events.
+var inMemoryDummyObjectReference = corev1.ObjectReference{
+ Kind: "Pod",
+ Namespace: "dummy",
+ Name: "dummy",
+ APIVersion: "v1",
+}
+
+type InMemoryRecorder interface {
+ Events() []*corev1.Event
+ Recorder
+}
+
+// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method.
+// This recorder should be only used in unit tests.
+func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder {
+ return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent}
+}
+
+func (r *inMemoryEventRecorder) ComponentName() string {
+ return r.source
+}
+
+func (r *inMemoryEventRecorder) Shutdown() {}
+
+func (r *inMemoryEventRecorder) ForComponent(component string) Recorder {
+ r.Lock()
+ defer r.Unlock()
+ r.source = component
+ return r
+}
+
+func (r *inMemoryEventRecorder) WithContext(ctx context.Context) Recorder {
+ r.ctx = ctx
+ return r
+}
+
+func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder {
+ return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
+}
+
+// Events returns list of recorded events
+func (r *inMemoryEventRecorder) Events() []*corev1.Event {
+ return r.events
+}
+
+func (r *inMemoryEventRecorder) Event(reason, message string) {
+ r.Lock()
+ defer r.Unlock()
+ event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message)
+ r.events = append(r.events, event)
+}
+
+func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
+ r.Event(reason, fmt.Sprintf(messageFmt, args...))
+}
+
+func (r *inMemoryEventRecorder) Warning(reason, message string) {
+ r.Lock()
+ defer r.Unlock()
+ event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message)
+ klog.Info(event.String())
+ r.events = append(r.events, event)
+}
+
+func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
+ r.Warning(reason, fmt.Sprintf(messageFmt, args...))
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go
new file mode 100644
index 0000000000..90639f2d93
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go
@@ -0,0 +1,58 @@
+package events
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/klog/v2"
+)
+
+type LoggingEventRecorder struct {
+ component string
+ ctx context.Context
+}
+
+func (r *LoggingEventRecorder) WithContext(ctx context.Context) Recorder {
+ r.ctx = ctx
+ return r
+}
+
+// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog.
+func NewLoggingEventRecorder(component string) Recorder {
+ return &LoggingEventRecorder{component: component}
+}
+
+func (r *LoggingEventRecorder) ComponentName() string {
+ return r.component
+}
+
+func (r *LoggingEventRecorder) ForComponent(component string) Recorder {
+ newRecorder := *r
+ newRecorder.component = component
+ return &newRecorder
+}
+
+func (r *LoggingEventRecorder) Shutdown() {}
+
+func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder {
+ return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
+}
+
+func (r *LoggingEventRecorder) Event(reason, message string) {
+ event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message)
+ klog.Info(event.String())
+}
+
+func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
+ r.Event(reason, fmt.Sprintf(messageFmt, args...))
+}
+
+func (r *LoggingEventRecorder) Warning(reason, message string) {
+ event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message)
+ klog.Warning(event.String())
+}
+
+func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
+ r.Warning(reason, fmt.Sprintf(messageFmt, args...))
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go
new file mode 100644
index 0000000000..0e41949a77
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go
@@ -0,0 +1,173 @@
+package events
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/kubernetes/scheme"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/component-base/metrics"
+ "k8s.io/component-base/metrics/legacyregistry"
+ "k8s.io/klog/v2"
+)
+
+// NewKubeRecorder returns new event recorder with tweaked correlator options.
+func NewKubeRecorderWithOptions(client corev1client.EventInterface, options record.CorrelatorOptions, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder {
+ return (&upstreamRecorder{
+ client: client,
+ component: sourceComponentName,
+ involvedObjectRef: involvedObjectRef,
+ options: options,
+ fallbackRecorder: NewRecorder(client, sourceComponentName, involvedObjectRef),
+ }).ForComponent(sourceComponentName)
+}
+
+// NewKubeRecorder returns new event recorder with default correlator options.
+func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder {
+ return NewKubeRecorderWithOptions(client, record.CorrelatorOptions{}, sourceComponentName, involvedObjectRef)
+}
+
+// upstreamRecorder is an implementation of Recorder interface.
+type upstreamRecorder struct {
+ client corev1client.EventInterface
+ clientCtx context.Context
+ component string
+ broadcaster record.EventBroadcaster
+ eventRecorder record.EventRecorder
+ involvedObjectRef *corev1.ObjectReference
+ options record.CorrelatorOptions
+
+ // shuttingDown indicates that the broadcaster for this recorder is being shut down
+ shuttingDown bool
+ shutdownMutex sync.RWMutex
+
+ // fallbackRecorder is used when the kube recorder is shutting down
+ // in that case we create the events directly.
+ fallbackRecorder Recorder
+}
+
+func (r *upstreamRecorder) WithContext(ctx context.Context) Recorder {
+ r.clientCtx = ctx
+ return r
+}
+
+// RecommendedClusterSingletonCorrelatorOptions provides recommended event correlator options for components that produce
+// many events (like operators).
+func RecommendedClusterSingletonCorrelatorOptions() record.CorrelatorOptions {
+ return record.CorrelatorOptions{
+ BurstSize: 60, // default: 25 (change allows a single source to send 50 events about object per minute)
+ QPS: 1. / 1., // default: 1/300 (change allows refill rate to 1 new event every 1s)
+ KeyFunc: func(event *corev1.Event) (aggregateKey string, localKey string) {
+ return strings.Join([]string{
+ event.Source.Component,
+ event.Source.Host,
+ event.InvolvedObject.Kind,
+ event.InvolvedObject.Namespace,
+ event.InvolvedObject.Name,
+ string(event.InvolvedObject.UID),
+ event.InvolvedObject.APIVersion,
+ event.Type,
+ event.Reason,
+ // By default, KeyFunc don't use message for aggregation, this cause events with different message, but same reason not be lost as "similar events".
+ event.Message,
+ }, ""), event.Message
+ },
+ }
+}
+
+var eventsCounterMetric = metrics.NewCounterVec(&metrics.CounterOpts{
+ Subsystem: "event_recorder",
+ Name: "total_events_count",
+ Help: "Total count of events processed by this event recorder per involved object",
+ StabilityLevel: metrics.ALPHA,
+}, []string{"severity"})
+
+func init() {
+ (&sync.Once{}).Do(func() {
+ legacyregistry.MustRegister(eventsCounterMetric)
+ })
+}
+
+func (r *upstreamRecorder) ForComponent(componentName string) Recorder {
+ newRecorderForComponent := upstreamRecorder{
+ client: r.client,
+ fallbackRecorder: r.fallbackRecorder.WithComponentSuffix(componentName),
+ options: r.options,
+ involvedObjectRef: r.involvedObjectRef,
+ shuttingDown: r.shuttingDown,
+ }
+
+ // tweak the event correlator, so we don't loose important events.
+ broadcaster := record.NewBroadcasterWithCorrelatorOptions(r.options)
+ broadcaster.StartLogging(klog.Infof)
+ broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client})
+
+ newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName})
+ newRecorderForComponent.broadcaster = broadcaster
+ newRecorderForComponent.component = componentName
+
+ return &newRecorderForComponent
+}
+
+func (r *upstreamRecorder) Shutdown() {
+ r.shutdownMutex.Lock()
+ r.shuttingDown = true
+ r.shutdownMutex.Unlock()
+ // Wait for broadcaster to flush events (this is blocking)
+ // TODO: There is still race condition in upstream that might cause panic() on events recorded after the shutdown
+ // is called as the event recording is not-blocking (go routine based).
+ r.broadcaster.Shutdown()
+}
+
+func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder {
+ return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix))
+}
+
+func (r *upstreamRecorder) ComponentName() string {
+ return r.component
+}
+
+// Eventf emits the normal type event and allow formatting of message.
+func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) {
+ r.Event(reason, fmt.Sprintf(messageFmt, args...))
+}
+
+// Warningf emits the warning type event and allow formatting of message.
+func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) {
+ r.Warning(reason, fmt.Sprintf(messageFmt, args...))
+}
+
+func (r *upstreamRecorder) incrementEventsCounter(severity string) {
+ if r.involvedObjectRef == nil {
+ return
+ }
+ eventsCounterMetric.WithLabelValues(severity).Inc()
+}
+
+// Event emits the normal type event.
+func (r *upstreamRecorder) Event(reason, message string) {
+ r.shutdownMutex.RLock()
+ defer r.shutdownMutex.RUnlock()
+ defer r.incrementEventsCounter(corev1.EventTypeNormal)
+ if r.shuttingDown {
+ r.fallbackRecorder.Event(reason, message)
+ return
+ }
+ r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message)
+}
+
+// Warning emits the warning type event.
+func (r *upstreamRecorder) Warning(reason, message string) {
+ r.shutdownMutex.RLock()
+ defer r.shutdownMutex.RUnlock()
+ defer r.incrementEventsCounter(corev1.EventTypeWarning)
+ if r.shuttingDown {
+ r.fallbackRecorder.Warning(reason, message)
+ return
+ }
+ r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go
new file mode 100644
index 0000000000..294770f3e0
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go
@@ -0,0 +1,77 @@
+package management
+
+import (
+ v1 "github.com/openshift/api/operator/v1"
+)
+
+var (
+ allowOperatorUnmanagedState = true
+ allowOperatorRemovedState = true
+)
+
+// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state.
+// This is a case of control plane operators or operators that are required to always run otherwise the cluster will
+// get into unstable state or critical components will stop working.
+func SetOperatorAlwaysManaged() {
+ allowOperatorUnmanagedState = false
+}
+
+// SetOperatorUnmanageable is one time choice when an operator wants to support the "unmanaged" state.
+// This is the default setting, provided here mostly for unit tests.
+func SetOperatorUnmanageable() {
+ allowOperatorUnmanagedState = true
+}
+
+// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support
+// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a
+// bricked, non-automatically recoverable state.
+func SetOperatorNotRemovable() {
+ allowOperatorRemovedState = false
+}
+
+// SetOperatorRemovable is one time choice the operator author can make to indicate the operator supports
+// removing of his operand.
+// This is the default setting, provided here mostly for unit tests.
+func SetOperatorRemovable() {
+ allowOperatorRemovedState = true
+}
+
+// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state.
+func IsOperatorAlwaysManaged() bool {
+ return !allowOperatorUnmanagedState
+}
+
+// IsOperatorNotRemovable means the operator can't be set to removed state.
+func IsOperatorNotRemovable() bool {
+ return !allowOperatorRemovedState
+}
+
+// IsOperatorRemovable means the operator can be set to removed state.
+func IsOperatorRemovable() bool {
+ return allowOperatorRemovedState
+}
+
+func IsOperatorUnknownState(state v1.ManagementState) bool {
+ switch state {
+ case v1.Managed, v1.Removed, v1.Unmanaged:
+ return false
+ default:
+ return true
+ }
+}
+
+// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand.
+func IsOperatorManaged(state v1.ManagementState) bool {
+ if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() {
+ return true
+ }
+ switch state {
+ case v1.Managed:
+ return true
+ case v1.Removed:
+ return false
+ case v1.Unmanaged:
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go
new file mode 100644
index 0000000000..fafa39c404
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go
@@ -0,0 +1,166 @@
+package resourceapply
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ admissionregistrationclientv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1"
+ "k8s.io/klog/v2"
+)
+
+// ApplyMutatingWebhookConfigurationImproved ensures the form of the specified
+// mutatingwebhookconfiguration is present in the API. If it does not exist,
+// it will be created. If it does exist, the metadata of the required
+// mutatingwebhookconfiguration will be merged with the existing mutatingwebhookconfiguration
+// and an update performed if the mutatingwebhookconfiguration spec and metadata differ from
+// the previously required spec and metadata based on generation change.
+func ApplyMutatingWebhookConfigurationImproved(ctx context.Context, client admissionregistrationclientv1.MutatingWebhookConfigurationsGetter, recorder events.Recorder,
+ requiredOriginal *admissionregistrationv1.MutatingWebhookConfiguration, cache ResourceCache) (*admissionregistrationv1.MutatingWebhookConfiguration, bool, error) {
+
+ if requiredOriginal == nil {
+ return nil, false, fmt.Errorf("Unexpected nil instead of an object")
+ }
+
+ existing, err := client.MutatingWebhookConfigurations().Get(ctx, requiredOriginal.GetName(), metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ required := requiredOriginal.DeepCopy()
+ actual, err := client.MutatingWebhookConfigurations().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*admissionregistrationv1.MutatingWebhookConfiguration), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ if err != nil {
+ return nil, false, err
+ }
+ // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy
+ cache.UpdateCachedResourceMetadata(requiredOriginal, actual)
+ return actual, true, nil
+ } else if err != nil {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(requiredOriginal, existing) {
+ return existing, false, nil
+ }
+
+ required := requiredOriginal.DeepCopy()
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ copyMutatingWebhookCABundle(existing, required)
+ webhooksEquivalent := equality.Semantic.DeepEqual(existingCopy.Webhooks, required.Webhooks)
+ if webhooksEquivalent && !*modified {
+ // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy
+ cache.UpdateCachedResourceMetadata(requiredOriginal, existingCopy)
+ return existingCopy, false, nil
+ }
+ // at this point we know that we're going to perform a write. We're just trying to get the object correct
+ toWrite := existingCopy // shallow copy so the code reads easier
+ toWrite.Webhooks = required.Webhooks
+
+ klog.V(4).Infof("MutatingWebhookConfiguration %q changes: %v", required.GetNamespace()+"/"+required.GetName(), JSONPatchNoError(existing, toWrite))
+
+ actual, err := client.MutatingWebhookConfigurations().Update(ctx, toWrite, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ if err != nil {
+ return nil, false, err
+ }
+ // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy
+ cache.UpdateCachedResourceMetadata(requiredOriginal, actual)
+ return actual, true, nil
+}
+
+// copyMutatingWebhookCABundle populates webhooks[].clientConfig.caBundle fields from existing resource if it was set before
+// and is not set in present. This provides upgrade compatibility with service-ca-bundle operator.
+func copyMutatingWebhookCABundle(from, to *admissionregistrationv1.MutatingWebhookConfiguration) {
+ fromMap := make(map[string]admissionregistrationv1.MutatingWebhook, len(from.Webhooks))
+ for _, webhook := range from.Webhooks {
+ fromMap[webhook.Name] = webhook
+ }
+
+ for i, wh := range to.Webhooks {
+ if existing, ok := fromMap[wh.Name]; ok && wh.ClientConfig.CABundle == nil {
+ to.Webhooks[i].ClientConfig.CABundle = existing.ClientConfig.CABundle
+ }
+ }
+}
+
+// ApplyValidatingWebhookConfigurationImproved ensures the form of the specified
+// validatingwebhookconfiguration is present in the API. If it does not exist,
+// it will be created. If it does exist, the metadata of the required
+// validatingwebhookconfiguration will be merged with the existing validatingwebhookconfiguration
+// and an update performed if the validatingwebhookconfiguration spec and metadata differ from
+// the previously required spec and metadata based on generation change.
+func ApplyValidatingWebhookConfigurationImproved(ctx context.Context, client admissionregistrationclientv1.ValidatingWebhookConfigurationsGetter, recorder events.Recorder,
+ requiredOriginal *admissionregistrationv1.ValidatingWebhookConfiguration, cache ResourceCache) (*admissionregistrationv1.ValidatingWebhookConfiguration, bool, error) {
+ if requiredOriginal == nil {
+ return nil, false, fmt.Errorf("Unexpected nil instead of an object")
+ }
+
+ existing, err := client.ValidatingWebhookConfigurations().Get(ctx, requiredOriginal.GetName(), metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ required := requiredOriginal.DeepCopy()
+ actual, err := client.ValidatingWebhookConfigurations().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*admissionregistrationv1.ValidatingWebhookConfiguration), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ if err != nil {
+ return nil, false, err
+ }
+ // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy
+ cache.UpdateCachedResourceMetadata(requiredOriginal, actual)
+ return actual, true, nil
+ } else if err != nil {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(requiredOriginal, existing) {
+ return existing, false, nil
+ }
+
+ required := requiredOriginal.DeepCopy()
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ copyValidatingWebhookCABundle(existing, required)
+ webhooksEquivalent := equality.Semantic.DeepEqual(existingCopy.Webhooks, required.Webhooks)
+ if webhooksEquivalent && !*modified {
+ // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy
+ cache.UpdateCachedResourceMetadata(requiredOriginal, existingCopy)
+ return existingCopy, false, nil
+ }
+ // at this point we know that we're going to perform a write. We're just trying to get the object correct
+ toWrite := existingCopy // shallow copy so the code reads easier
+ toWrite.Webhooks = required.Webhooks
+
+ klog.V(4).Infof("ValidatingWebhookConfiguration %q changes: %v", required.GetNamespace()+"/"+required.GetName(), JSONPatchNoError(existing, toWrite))
+
+ actual, err := client.ValidatingWebhookConfigurations().Update(ctx, toWrite, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ if err != nil {
+ return nil, false, err
+ }
+ // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy
+ cache.UpdateCachedResourceMetadata(requiredOriginal, actual)
+ return actual, true, nil
+}
+
+// copyValidatingWebhookCABundle populates webhooks[].clientConfig.caBundle fields from existing resource if it was set before
+// and is not set in present. This provides upgrade compatibility with service-ca-bundle operator.
+func copyValidatingWebhookCABundle(from, to *admissionregistrationv1.ValidatingWebhookConfiguration) {
+ fromMap := make(map[string]admissionregistrationv1.ValidatingWebhook, len(from.Webhooks))
+ for _, webhook := range from.Webhooks {
+ fromMap[webhook.Name] = webhook
+ }
+
+ for i, wh := range to.Webhooks {
+ if existing, ok := fromMap[wh.Name]; ok && wh.ClientConfig.CABundle == nil {
+ to.Webhooks[i].ClientConfig.CABundle = existing.ClientConfig.CABundle
+ }
+ }
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go
new file mode 100644
index 0000000000..6cd94f64dc
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go
@@ -0,0 +1,56 @@
+package resourceapply
+
+import (
+ "context"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/klog/v2"
+)
+
+// ApplyCustomResourceDefinitionV1 applies the required CustomResourceDefinition to the cluster.
+func ApplyCustomResourceDefinitionV1(ctx context.Context, client apiextclientv1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextensionsv1.CustomResourceDefinition) (*apiextensionsv1.CustomResourceDefinition, bool, error) {
+ existing, err := client.CustomResourceDefinitions().Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.CustomResourceDefinitions().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*apiextensionsv1.CustomResourceDefinition), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+ resourcemerge.EnsureCustomResourceDefinitionV1(modified, existingCopy, *required)
+ if !*modified {
+ return existing, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("CustomResourceDefinition %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy))
+ }
+
+ actual, err := client.CustomResourceDefinitions().Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+
+ return actual, true, err
+}
+
+func DeleteCustomResourceDefinitionV1(ctx context.Context, client apiextclientv1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextensionsv1.CustomResourceDefinition) (*apiextensionsv1.CustomResourceDefinition, bool, error) {
+ err := client.CustomResourceDefinitions().Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go
new file mode 100644
index 0000000000..b09bf46f22
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go
@@ -0,0 +1,51 @@
+package resourceapply
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/klog/v2"
+ apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
+ apiregistrationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+)
+
+// ApplyAPIService merges objectmeta and requires apiservice coordinates. It does not touch CA bundles, which should be managed via service CA controller.
+func ApplyAPIService(ctx context.Context, client apiregistrationv1client.APIServicesGetter, recorder events.Recorder, required *apiregistrationv1.APIService) (*apiregistrationv1.APIService, bool, error) {
+ existing, err := client.APIServices().Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.APIServices().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*apiregistrationv1.APIService), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ serviceSame := equality.Semantic.DeepEqual(existingCopy.Spec.Service, required.Spec.Service)
+ prioritySame := existingCopy.Spec.VersionPriority == required.Spec.VersionPriority && existingCopy.Spec.GroupPriorityMinimum == required.Spec.GroupPriorityMinimum
+ insecureSame := existingCopy.Spec.InsecureSkipTLSVerify == required.Spec.InsecureSkipTLSVerify
+ // there was no change to metadata, the service and priorities were right
+ if !*modified && serviceSame && prioritySame && insecureSame {
+ return existingCopy, false, nil
+ }
+
+ existingCopy.Spec = required.Spec
+
+ if klog.V(4).Enabled() {
+ klog.Infof("APIService %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy))
+ }
+ actual, err := client.APIServices().Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go
new file mode 100644
index 0000000000..b2a645e5d5
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go
@@ -0,0 +1,246 @@
+package resourceapply
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+
+ "k8s.io/klog/v2"
+
+ appsv1 "k8s.io/api/apps/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/uuid"
+ appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+)
+
+// The Apply methods in this file ensure that a resource is created or updated to match
+// the form provided by the caller.
+//
+// If the resource does not yet exist, it will be created.
+//
+// If the resource exists, the metadata of the required resource will be merged with the
+// existing resource and an update will be performed if the spec and metadata differ between
+// the required and existing resources. To be reliable, the input of the required spec from
+// the operator should be stable. It does not need to set all fields, since some fields are
+// defaulted server-side. Detection of spec drift from intent by other actors is determined
+// by generation, not by spec comparison.
+//
+// To ensure an update in response to state external to the resource spec, the caller should
+// set an annotation representing that external state e.g.
+//
+// `myoperator.openshift.io/config-resource-version: `
+//
+// An update will be performed if:
+//
+// - The required resource metadata differs from that of the existing resource.
+// - The difference will be detected by comparing the name, namespace, labels and
+// annotations of the 2 resources.
+//
+// - The generation expected by the operator differs from generation of the existing
+// resource.
+// - This is the likely result of an actor other than the operator updating a resource
+// managed by the operator.
+//
+// - The spec of the required resource differs from the spec of the existing resource.
+// - The difference will be detected via metadata comparison since the hash of the
+// resource's spec will be set as an annotation prior to comparison.
+
+const specHashAnnotation = "operator.openshift.io/spec-hash"
+
+// SetSpecHashAnnotation computes the hash of the provided spec and sets an annotation of the
+// hash on the provided ObjectMeta. This method is used internally by Apply methods, and
+// is exposed to support testing with fake clients that need to know the mutated form of the
+// resource resulting from an Apply call.
+func SetSpecHashAnnotation(objMeta *metav1.ObjectMeta, spec interface{}) error {
+ jsonBytes, err := json.Marshal(spec)
+ if err != nil {
+ return err
+ }
+ specHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes))
+ if objMeta.Annotations == nil {
+ objMeta.Annotations = map[string]string{}
+ }
+ objMeta.Annotations[specHashAnnotation] = specHash
+ return nil
+}
+
+// ApplyDeployment ensures the form of the specified deployment is present in the API. If it
+// does not exist, it will be created. If it does exist, the metadata of the required
+// deployment will be merged with the existing deployment and an update performed if the
+// deployment spec and metadata differ from the previously required spec and metadata. For
+// further detail, check the top-level comment.
+//
+// NOTE: The previous implementation of this method was renamed to
+// ApplyDeploymentWithForce. If are reading this in response to a compile error due to the
+// change in signature, you have the following options:
+//
+// - Update the calling code to rely on the spec comparison provided by the new
+// implementation. If the code in question was specifying the force parameter to ensure
+// rollout in response to changes in resources external to the deployment, it will need to be
+// revised to set that external state as an annotation e.g.
+//
+// myoperator.openshift.io/my-resource:
+//
+// - Update the call to use ApplyDeploymentWithForce. This is available as a temporary measure
+// but the method is deprecated and will be removed in 4.6.
+func ApplyDeployment(ctx context.Context, client appsclientv1.DeploymentsGetter, recorder events.Recorder,
+ requiredOriginal *appsv1.Deployment, expectedGeneration int64) (*appsv1.Deployment, bool, error) {
+
+ required := requiredOriginal.DeepCopy()
+ err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec)
+ if err != nil {
+ return nil, false, err
+ }
+
+ return ApplyDeploymentWithForce(ctx, client, recorder, required, expectedGeneration, false)
+}
+
+// ApplyDeploymentWithForce merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error.
+//
+// DEPRECATED - This method will be removed in 4.6 and callers will need to migrate to ApplyDeployment before then.
+func ApplyDeploymentWithForce(ctx context.Context, client appsclientv1.DeploymentsGetter, recorder events.Recorder, requiredOriginal *appsv1.Deployment, expectedGeneration int64,
+ forceRollout bool) (*appsv1.Deployment, bool, error) {
+
+ required := requiredOriginal.DeepCopy()
+ if required.Annotations == nil {
+ required.Annotations = map[string]string{}
+ }
+ if _, ok := required.Annotations[specHashAnnotation]; !ok {
+ // If the spec hash annotation is not present, the caller expects the
+ // pull-spec annotation to be applied.
+ required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image
+ }
+ existing, err := client.Deployments(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ actual, err := client.Deployments(required.Namespace).Create(ctx, required, metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ // there was no change to metadata, the generation was right, and we weren't asked for force the deployment
+ if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout {
+ return existingCopy, false, nil
+ }
+
+ // at this point we know that we're going to perform a write. We're just trying to get the object correct
+ toWrite := existingCopy // shallow copy so the code reads easier
+ toWrite.Spec = *required.Spec.DeepCopy()
+ if forceRollout {
+ // forces a deployment
+ forceString := string(uuid.NewUUID())
+ if toWrite.Annotations == nil {
+ toWrite.Annotations = map[string]string{}
+ }
+ if toWrite.Spec.Template.Annotations == nil {
+ toWrite.Spec.Template.Annotations = map[string]string{}
+ }
+ toWrite.Annotations["operator.openshift.io/force"] = forceString
+ toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("Deployment %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite))
+ }
+
+ actual, err := client.Deployments(required.Namespace).Update(ctx, toWrite, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
+
+// ApplyDaemonSet ensures the form of the specified daemonset is present in the API. If it
+// does not exist, it will be created. If it does exist, the metadata of the required
+// daemonset will be merged with the existing daemonset and an update performed if the
+// daemonset spec and metadata differ from the previously required spec and metadata. For
+// further detail, check the top-level comment.
+//
+// NOTE: The previous implementation of this method was renamed to ApplyDaemonSetWithForce. If
+// are reading this in response to a compile error due to the change in signature, you have
+// the following options:
+//
+// - Update the calling code to rely on the spec comparison provided by the new
+// implementation. If the code in question was specifying the force parameter to ensure
+// rollout in response to changes in resources external to the daemonset, it will need to be
+// revised to set that external state as an annotation e.g.
+//
+// myoperator.openshift.io/my-resource:
+//
+// - Update the call to use ApplyDaemonSetWithForce. This is available as a temporary measure
+// but the method is deprecated and will be removed in 4.6.
+func ApplyDaemonSet(ctx context.Context, client appsclientv1.DaemonSetsGetter, recorder events.Recorder,
+ requiredOriginal *appsv1.DaemonSet, expectedGeneration int64) (*appsv1.DaemonSet, bool, error) {
+
+ required := requiredOriginal.DeepCopy()
+ err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec)
+ if err != nil {
+ return nil, false, err
+ }
+
+ return ApplyDaemonSetWithForce(ctx, client, recorder, required, expectedGeneration, false)
+}
+
+// ApplyDaemonSetWithForce merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error
+// DEPRECATED - This method will be removed in 4.6 and callers will need to migrate to ApplyDaemonSet before then.
+func ApplyDaemonSetWithForce(ctx context.Context, client appsclientv1.DaemonSetsGetter, recorder events.Recorder, requiredOriginal *appsv1.DaemonSet, expectedGeneration int64, forceRollout bool) (*appsv1.DaemonSet, bool, error) {
+ required := requiredOriginal.DeepCopy()
+ if required.Annotations == nil {
+ required.Annotations = map[string]string{}
+ }
+ if _, ok := required.Annotations[specHashAnnotation]; !ok {
+ // If the spec hash annotation is not present, the caller expects the
+ // pull-spec annotation to be applied.
+ required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image
+ }
+ existing, err := client.DaemonSets(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ actual, err := client.DaemonSets(required.Namespace).Create(ctx, required, metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ // there was no change to metadata, the generation was right, and we weren't asked for force the deployment
+ if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout {
+ return existingCopy, false, nil
+ }
+
+ // at this point we know that we're going to perform a write. We're just trying to get the object correct
+ toWrite := existingCopy // shallow copy so the code reads easier
+ toWrite.Spec = *required.Spec.DeepCopy()
+ if forceRollout {
+ // forces a deployment
+ forceString := string(uuid.NewUUID())
+ if toWrite.Annotations == nil {
+ toWrite.Annotations = map[string]string{}
+ }
+ if toWrite.Spec.Template.Annotations == nil {
+ toWrite.Spec.Template.Annotations = map[string]string{}
+ }
+ toWrite.Annotations["operator.openshift.io/force"] = forceString
+ toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("DaemonSet %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite))
+ }
+ actual, err := client.DaemonSets(required.Namespace).Update(ctx, toWrite, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go
new file mode 100644
index 0000000000..c519d4dc57
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go
@@ -0,0 +1,657 @@
+package resourceapply
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/sets"
+ coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ "k8s.io/klog/v2"
+)
+
+// TODO find way to create a registry of these based on struct mapping or some such that forces users to get this right
+//
+// for creating an ApplyGeneric
+// Perhaps a struct containing the apply function and the getKind
+func getCoreGroupKind(obj runtime.Object) *schema.GroupKind {
+ switch obj.(type) {
+ case *corev1.Namespace:
+ return &schema.GroupKind{
+ Kind: "Namespace",
+ }
+ case *corev1.Service:
+ return &schema.GroupKind{
+ Kind: "Service",
+ }
+ case *corev1.Pod:
+ return &schema.GroupKind{
+ Kind: "Pod",
+ }
+ case *corev1.ServiceAccount:
+ return &schema.GroupKind{
+ Kind: "ServiceAccount",
+ }
+ case *corev1.ConfigMap:
+ return &schema.GroupKind{
+ Kind: "ConfigMap",
+ }
+ case *corev1.Secret:
+ return &schema.GroupKind{
+ Kind: "Secret",
+ }
+ default:
+ return nil
+ }
+}
+
+// ApplyNamespace merges objectmeta, does not worry about anything else
+func ApplyNamespace(ctx context.Context, client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace) (*corev1.Namespace, bool, error) {
+ return ApplyNamespaceImproved(ctx, client, recorder, required, noCache)
+}
+
+// ApplyService merges objectmeta and requires
+// TODO, since this cannot determine whether changes are due to legitimate actors (api server) or illegitimate ones (users), we cannot update
+// TODO I've special cased the selector for now
+func ApplyService(ctx context.Context, client coreclientv1.ServicesGetter, recorder events.Recorder, required *corev1.Service) (*corev1.Service, bool, error) {
+ return ApplyServiceImproved(ctx, client, recorder, required, noCache)
+}
+
+// ApplyPod merges objectmeta, does not worry about anything else
+func ApplyPod(ctx context.Context, client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod) (*corev1.Pod, bool, error) {
+ return ApplyPodImproved(ctx, client, recorder, required, noCache)
+}
+
+// ApplyServiceAccount merges objectmeta, does not worry about anything else
+func ApplyServiceAccount(ctx context.Context, client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) {
+ return ApplyServiceAccountImproved(ctx, client, recorder, required, noCache)
+}
+
+// ApplyConfigMap merges objectmeta, requires data
+func ApplyConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) {
+ return ApplyConfigMapImproved(ctx, client, recorder, required, noCache)
+}
+
+// ApplySecret merges objectmeta, requires data
+func ApplySecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) {
+ return ApplySecretImproved(ctx, client, recorder, required, noCache)
+}
+
+// ApplyNamespace merges objectmeta, does not worry about anything else
+func ApplyNamespaceImproved(ctx context.Context, client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace, cache ResourceCache) (*corev1.Namespace, bool, error) {
+ existing, err := client.Namespaces().Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.Namespaces().
+ Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Namespace), metav1.CreateOptions{})
+ reportCreateEvent(recorder, requiredCopy, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(required, existing) {
+ return existing, false, nil
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ if !*modified {
+ cache.UpdateCachedResourceMetadata(required, existingCopy)
+ return existingCopy, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("Namespace %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy))
+ }
+
+ actual, err := client.Namespaces().Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+}
+
+// ApplyService merges objectmeta and requires.
+// It detects changes in `required`, i.e. an operator needs .spec changes and overwrites existing .spec with those.
+// TODO, since this cannot determine whether changes in `existing` are due to legitimate actors (api server) or illegitimate ones (users), we cannot update.
+// TODO I've special cased the selector for now
+func ApplyServiceImproved(ctx context.Context, client coreclientv1.ServicesGetter, recorder events.Recorder, requiredOriginal *corev1.Service, cache ResourceCache) (*corev1.Service, bool, error) {
+ required := requiredOriginal.DeepCopy()
+ err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec)
+ if err != nil {
+ return nil, false, err
+ }
+
+ existing, err := client.Services(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.Services(requiredCopy.Namespace).
+ Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Service), metav1.CreateOptions{})
+ reportCreateEvent(recorder, requiredCopy, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(required, existing) {
+ return existing, false, nil
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ // This will catch also changes between old `required.spec` and current `required.spec`, because
+ // the annotation from SetSpecHashAnnotation will be different.
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ selectorSame := equality.Semantic.DeepEqual(existingCopy.Spec.Selector, required.Spec.Selector)
+
+ typeSame := false
+ requiredIsEmpty := len(required.Spec.Type) == 0
+ existingCopyIsCluster := existingCopy.Spec.Type == corev1.ServiceTypeClusterIP
+ if (requiredIsEmpty && existingCopyIsCluster) || equality.Semantic.DeepEqual(existingCopy.Spec.Type, required.Spec.Type) {
+ typeSame = true
+ }
+
+ if selectorSame && typeSame && !*modified {
+ cache.UpdateCachedResourceMetadata(required, existingCopy)
+ return existingCopy, false, nil
+ }
+
+ // Either (user changed selector or type) or metadata changed (incl. spec hash). Stomp over
+ // any user *and* Kubernetes changes, hoping that Kubernetes will restore its values.
+ existingCopy.Spec = required.Spec
+ if klog.V(4).Enabled() {
+ klog.Infof("Service %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required))
+ }
+
+ actual, err := client.Services(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+}
+
+// ApplyPod merges objectmeta, does not worry about anything else
+func ApplyPodImproved(ctx context.Context, client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod, cache ResourceCache) (*corev1.Pod, bool, error) {
+ existing, err := client.Pods(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.Pods(requiredCopy.Namespace).
+ Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Pod), metav1.CreateOptions{})
+ reportCreateEvent(recorder, requiredCopy, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(required, existing) {
+ return existing, false, nil
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ if !*modified {
+ cache.UpdateCachedResourceMetadata(required, existingCopy)
+ return existingCopy, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("Pod %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required))
+ }
+
+ actual, err := client.Pods(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+}
+
+// ApplyServiceAccount merges objectmeta, does not worry about anything else
+func ApplyServiceAccountImproved(ctx context.Context, client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount, cache ResourceCache) (*corev1.ServiceAccount, bool, error) {
+ existing, err := client.ServiceAccounts(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.ServiceAccounts(requiredCopy.Namespace).
+ Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.ServiceAccount), metav1.CreateOptions{})
+ reportCreateEvent(recorder, requiredCopy, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(required, existing) {
+ return existing, false, nil
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ if !*modified {
+ cache.UpdateCachedResourceMetadata(required, existingCopy)
+ return existingCopy, false, nil
+ }
+ if klog.V(4).Enabled() {
+ klog.Infof("ServiceAccount %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required))
+ }
+ actual, err := client.ServiceAccounts(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+}
+
+// ApplyConfigMap merges objectmeta, requires data
+func ApplyConfigMapImproved(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap, cache ResourceCache) (*corev1.ConfigMap, bool, error) {
+ existing, err := client.ConfigMaps(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.ConfigMaps(requiredCopy.Namespace).
+ Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.ConfigMap), metav1.CreateOptions{})
+ reportCreateEvent(recorder, requiredCopy, err)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(required, existing) {
+ return existing, false, nil
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+
+ caBundleInjected := required.Labels["config.openshift.io/inject-trusted-cabundle"] == "true"
+ _, newCABundleRequired := required.Data["ca-bundle.crt"]
+
+ var modifiedKeys []string
+ for existingCopyKey, existingCopyValue := range existingCopy.Data {
+ // if we're injecting a ca-bundle and the required isn't forcing the value, then don't use the value of existing
+ // to drive a diff detection. If required has set the value then we need to force the value in order to have apply
+ // behave predictably.
+ if caBundleInjected && !newCABundleRequired && existingCopyKey == "ca-bundle.crt" {
+ continue
+ }
+ if requiredValue, ok := required.Data[existingCopyKey]; !ok || (existingCopyValue != requiredValue) {
+ modifiedKeys = append(modifiedKeys, "data."+existingCopyKey)
+ }
+ }
+ for existingCopyKey, existingCopyBinValue := range existingCopy.BinaryData {
+ if requiredBinValue, ok := required.BinaryData[existingCopyKey]; !ok || !bytes.Equal(existingCopyBinValue, requiredBinValue) {
+ modifiedKeys = append(modifiedKeys, "binaryData."+existingCopyKey)
+ }
+ }
+ for requiredKey := range required.Data {
+ if _, ok := existingCopy.Data[requiredKey]; !ok {
+ modifiedKeys = append(modifiedKeys, "data."+requiredKey)
+ }
+ }
+ for requiredBinKey := range required.BinaryData {
+ if _, ok := existingCopy.BinaryData[requiredBinKey]; !ok {
+ modifiedKeys = append(modifiedKeys, "binaryData."+requiredBinKey)
+ }
+ }
+
+ dataSame := len(modifiedKeys) == 0
+ if dataSame && !*modified {
+ cache.UpdateCachedResourceMetadata(required, existingCopy)
+ return existingCopy, false, nil
+ }
+ existingCopy.Data = required.Data
+ existingCopy.BinaryData = required.BinaryData
+ // if we're injecting a cabundle, and we had a previous value, and the required object isn't setting the value, then set back to the previous
+ if existingCABundle, existedBefore := existing.Data["ca-bundle.crt"]; caBundleInjected && existedBefore && !newCABundleRequired {
+ if existingCopy.Data == nil {
+ existingCopy.Data = map[string]string{}
+ }
+ existingCopy.Data["ca-bundle.crt"] = existingCABundle
+ }
+
+ actual, err := client.ConfigMaps(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+
+ var details string
+ if !dataSame {
+ sort.Sort(sort.StringSlice(modifiedKeys))
+ details = fmt.Sprintf("cause by changes in %v", strings.Join(modifiedKeys, ","))
+ }
+ if klog.V(4).Enabled() {
+ klog.Infof("ConfigMap %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required))
+ }
+ reportUpdateEvent(recorder, required, err, details)
+ cache.UpdateCachedResourceMetadata(required, actual)
+ return actual, true, err
+}
+
+// ApplySecret merges objectmeta, requires data
+func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, requiredInput *corev1.Secret, cache ResourceCache) (*corev1.Secret, bool, error) {
+ // copy the stringData to data. Error on a data content conflict inside required. This is usually a bug.
+
+ existing, err := client.Secrets(requiredInput.Namespace).Get(ctx, requiredInput.Name, metav1.GetOptions{})
+ if err != nil && !apierrors.IsNotFound(err) {
+ return nil, false, err
+ }
+
+ if cache.SafeToSkipApply(requiredInput, existing) {
+ return existing, false, nil
+ }
+
+ required := requiredInput.DeepCopy()
+ if required.Data == nil {
+ required.Data = map[string][]byte{}
+ }
+ for k, v := range required.StringData {
+ if dataV, ok := required.Data[k]; ok {
+ if string(dataV) != v {
+ return nil, false, fmt.Errorf("Secret.stringData[%q] conflicts with Secret.data[%q]", k, k)
+ }
+ }
+ required.Data[k] = []byte(v)
+ }
+ required.StringData = nil
+
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.Secrets(requiredCopy.Namespace).
+ Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Secret), metav1.CreateOptions{})
+ reportCreateEvent(recorder, requiredCopy, err)
+ cache.UpdateCachedResourceMetadata(requiredInput, actual)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(resourcemerge.BoolPtr(false), &existingCopy.ObjectMeta, required.ObjectMeta)
+
+ switch required.Type {
+ case corev1.SecretTypeServiceAccountToken:
+ // Secrets for ServiceAccountTokens will have data injected by kube controller manager.
+ // We will apply only the explicitly set keys.
+ if existingCopy.Data == nil {
+ existingCopy.Data = map[string][]byte{}
+ }
+
+ for k, v := range required.Data {
+ existingCopy.Data[k] = v
+ }
+
+ default:
+ existingCopy.Data = required.Data
+ }
+
+ existingCopy.Type = required.Type
+
+ // Server defaults some values and we need to do it as well or it will never equal.
+ if existingCopy.Type == "" {
+ existingCopy.Type = corev1.SecretTypeOpaque
+ }
+
+ if equality.Semantic.DeepEqual(existingCopy, existing) {
+ cache.UpdateCachedResourceMetadata(requiredInput, existingCopy)
+ return existing, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("Secret %s/%s changes: %v", required.Namespace, required.Name, JSONPatchSecretNoError(existing, existingCopy))
+ }
+
+ var actual *corev1.Secret
+ /*
+ * Kubernetes validation silently hides failures to update secret type.
+ * https://github.com/kubernetes/kubernetes/blob/98e65951dccfd40d3b4f31949c2ab8df5912d93e/pkg/apis/core/validation/validation.go#L5048
+ * We need to explicitly opt for delete+create in that case.
+ */
+ if existingCopy.Type == existing.Type {
+ actual, err = client.Secrets(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, existingCopy, err)
+
+ if err == nil {
+ return actual, true, err
+ }
+ if !strings.Contains(err.Error(), "field is immutable") {
+ return actual, true, err
+ }
+ }
+
+ // if the field was immutable on a secret, we're going to be stuck until we delete it. Try to delete and then create
+ deleteErr := client.Secrets(required.Namespace).Delete(ctx, existingCopy.Name, metav1.DeleteOptions{})
+ reportDeleteEvent(recorder, existingCopy, deleteErr)
+
+ // clear the RV and track the original actual and error for the return like our create value.
+ existingCopy.ResourceVersion = ""
+ actual, err = client.Secrets(required.Namespace).Create(ctx, existingCopy, metav1.CreateOptions{})
+ reportCreateEvent(recorder, existingCopy, err)
+ cache.UpdateCachedResourceMetadata(requiredInput, actual)
+ return actual, true, err
+}
+
+// SyncConfigMap applies a ConfigMap from a location `sourceNamespace/sourceName` to `targetNamespace/targetName`
+func SyncConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) {
+ return SyncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs)
+}
+
+// SyncPartialConfigMap does what SyncConfigMap does but it only synchronizes a subset of keys given by `syncedKeys`.
+// SyncPartialConfigMap will delete the target if `syncedKeys` are set but the source does not contain any of these keys.
+func SyncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.String, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) {
+ source, err := client.ConfigMaps(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{})
+ switch {
+ case apierrors.IsNotFound(err):
+ modified, err := deleteConfigMapSyncTarget(ctx, client, recorder, targetNamespace, targetName)
+ return nil, modified, err
+ case err != nil:
+ return nil, false, err
+ default:
+ if len(syncedKeys) > 0 {
+ for sourceKey := range source.Data {
+ if !syncedKeys.Has(sourceKey) {
+ delete(source.Data, sourceKey)
+ }
+ }
+ for sourceKey := range source.BinaryData {
+ if !syncedKeys.Has(sourceKey) {
+ delete(source.BinaryData, sourceKey)
+ }
+ }
+
+ // remove the synced CM if the requested fields are not present in source
+ if len(source.Data)+len(source.BinaryData) == 0 {
+ modified, err := deleteConfigMapSyncTarget(ctx, client, recorder, targetNamespace, targetName)
+ return nil, modified, err
+ }
+ }
+
+ source.Namespace = targetNamespace
+ source.Name = targetName
+ source.ResourceVersion = ""
+ source.OwnerReferences = ownerRefs
+ return ApplyConfigMap(ctx, client, recorder, source)
+ }
+}
+
+func deleteConfigMapSyncTarget(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, targetNamespace, targetName string) (bool, error) {
+ // This goal of this additional GET is to avoid reaching the API with a DELETE request
+ // in case the target doesn't exist. This is useful when using a cached client.
+ _, err := client.ConfigMaps(targetNamespace).Get(ctx, targetName, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ err = client.ConfigMaps(targetNamespace).Delete(ctx, targetName, metav1.DeleteOptions{})
+ if apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ if err == nil {
+ recorder.Eventf("TargetConfigDeleted", "Deleted target configmap %s/%s because source config does not exist", targetNamespace, targetName)
+ return true, nil
+ }
+ return false, err
+}
+
+// SyncSecret applies a Secret from a location `sourceNamespace/sourceName` to `targetNamespace/targetName`
+func SyncSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) {
+ return SyncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs)
+}
+
+// SyncPartialSecret does what SyncSecret does but it only synchronizes a subset of keys given by `syncedKeys`.
+// SyncPartialSecret will delete the target if `syncedKeys` are set but the source does not contain any of these keys.
+func SyncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.String, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) {
+ source, err := client.Secrets(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{})
+ switch {
+ case apierrors.IsNotFound(err):
+ modified, err := deleteSecretSyncTarget(ctx, client, recorder, targetNamespace, targetName)
+ return nil, modified, err
+ case err != nil:
+ return nil, false, err
+ default:
+ if source.Type == corev1.SecretTypeServiceAccountToken {
+
+ // Make sure the token is already present, otherwise we have to wait before creating the target
+ if len(source.Data[corev1.ServiceAccountTokenKey]) == 0 {
+ return nil, false, fmt.Errorf("secret %s/%s doesn't have a token yet", source.Namespace, source.Name)
+ }
+
+ if source.Annotations != nil {
+ // When syncing a service account token we have to remove the SA annotation to disable injection into copies
+ delete(source.Annotations, corev1.ServiceAccountNameKey)
+ // To make it clean, remove the dormant annotations as well
+ delete(source.Annotations, corev1.ServiceAccountUIDKey)
+ }
+
+ // SecretTypeServiceAccountToken implies required fields and injection which we do not want in copies
+ source.Type = corev1.SecretTypeOpaque
+ }
+
+ if len(syncedKeys) > 0 {
+ for sourceKey := range source.Data {
+ if !syncedKeys.Has(sourceKey) {
+ delete(source.Data, sourceKey)
+ }
+ }
+ for sourceKey := range source.StringData {
+ if !syncedKeys.Has(sourceKey) {
+ delete(source.StringData, sourceKey)
+ }
+ }
+
+ // remove the synced secret if the requested fields are not present in source
+ if len(source.Data)+len(source.StringData) == 0 {
+ modified, err := deleteSecretSyncTarget(ctx, client, recorder, targetNamespace, targetName)
+ return nil, modified, err
+ }
+ }
+
+ source.Namespace = targetNamespace
+ source.Name = targetName
+ source.ResourceVersion = ""
+ source.OwnerReferences = ownerRefs
+ return ApplySecret(ctx, client, recorder, source)
+ }
+}
+
+func deleteSecretSyncTarget(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, targetNamespace, targetName string) (bool, error) {
+ err := client.Secrets(targetNamespace).Delete(ctx, targetName, metav1.DeleteOptions{})
+ if apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ if err == nil {
+ recorder.Eventf("TargetSecretDeleted", "Deleted target secret %s/%s because source config does not exist", targetNamespace, targetName)
+ return true, nil
+ }
+ return false, err
+}
+
+func DeleteNamespace(ctx context.Context, client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace) (*corev1.Namespace, bool, error) {
+ err := client.Namespaces().Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteService(ctx context.Context, client coreclientv1.ServicesGetter, recorder events.Recorder, required *corev1.Service) (*corev1.Service, bool, error) {
+ err := client.Services(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeletePod(ctx context.Context, client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod) (*corev1.Pod, bool, error) {
+ err := client.Pods(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteServiceAccount(ctx context.Context, client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) {
+ err := client.ServiceAccounts(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) {
+ err := client.ConfigMaps(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) {
+ err := client.Secrets(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/credentialsrequest.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/credentialsrequest.go
new file mode 100644
index 0000000000..2de8136a8a
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/credentialsrequest.go
@@ -0,0 +1,106 @@
+package resourceapply
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/client-go/dynamic"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcehelper"
+)
+
+const (
+ CredentialsRequestGroup = "cloudcredential.openshift.io"
+ CredentialsRequestVersion = "v1"
+ CredentialsRequestResource = "credentialsrequests"
+)
+
+var credentialsRequestResourceGVR schema.GroupVersionResource = schema.GroupVersionResource{
+ Group: CredentialsRequestGroup,
+ Version: CredentialsRequestVersion,
+ Resource: CredentialsRequestResource,
+}
+
+func AddCredentialsRequestHash(cr *unstructured.Unstructured) error {
+ jsonBytes, err := json.Marshal(cr.Object["spec"])
+ if err != nil {
+ return err
+ }
+ specHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes))
+ annotations := cr.GetAnnotations()
+ if annotations == nil {
+ annotations = map[string]string{}
+ }
+ annotations[specHashAnnotation] = specHash
+ cr.SetAnnotations(annotations)
+ return nil
+}
+
+func ApplyCredentialsRequest(
+ ctx context.Context,
+ client dynamic.Interface,
+ recorder events.Recorder,
+ required *unstructured.Unstructured,
+ expectedGeneration int64,
+) (*unstructured.Unstructured, bool, error) {
+ if required.GetName() == "" {
+ return nil, false, fmt.Errorf("invalid object: name cannot be empty")
+ }
+
+ if err := AddCredentialsRequestHash(required); err != nil {
+ return nil, false, err
+ }
+
+ crClient := client.Resource(credentialsRequestResourceGVR).Namespace(required.GetNamespace())
+ existing, err := crClient.Get(ctx, required.GetName(), metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ actual, err := crClient.Create(ctx, required, metav1.CreateOptions{})
+ if err == nil {
+ recorder.Eventf(
+ fmt.Sprintf("%sCreated", required.GetKind()),
+ "Created %s because it was missing",
+ resourcehelper.FormatResourceForCLIWithNamespace(required))
+ return actual, true, err
+ }
+ recorder.Warningf(
+ fmt.Sprintf("%sCreateFailed", required.GetKind()),
+ "Failed to create %s: %v",
+ resourcehelper.FormatResourceForCLIWithNamespace(required),
+ err)
+ return nil, false, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ // Check CredentialRequest.Generation.
+ needApply := false
+ if existing.GetGeneration() != expectedGeneration {
+ needApply = true
+ }
+
+ // Check specHashAnnotation
+ existingAnnotations := existing.GetAnnotations()
+ if existingAnnotations == nil || existingAnnotations[specHashAnnotation] != required.GetAnnotations()[specHashAnnotation] {
+ needApply = true
+ }
+
+ if !needApply {
+ return existing, false, nil
+ }
+
+ requiredCopy := required.DeepCopy()
+ existing.Object["spec"] = requiredCopy.Object["spec"]
+ actual, err := crClient.Update(ctx, existing, metav1.UpdateOptions{})
+ if err != nil {
+ return nil, false, err
+ }
+ return actual, existing.GetResourceVersion() != actual.GetResourceVersion(), nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go
new file mode 100644
index 0000000000..af598993f9
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go
@@ -0,0 +1,56 @@
+package resourceapply
+
+import (
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/runtime"
+
+ openshiftapi "github.com/openshift/api"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcehelper"
+)
+
+var (
+ openshiftScheme = runtime.NewScheme()
+)
+
+func init() {
+ if err := openshiftapi.Install(openshiftScheme); err != nil {
+ panic(err)
+ }
+}
+
+func reportCreateEvent(recorder events.Recorder, obj runtime.Object, originalErr error) {
+ gvk := resourcehelper.GuessObjectGroupVersionKind(obj)
+ if originalErr == nil {
+ recorder.Eventf(fmt.Sprintf("%sCreated", gvk.Kind), "Created %s because it was missing", resourcehelper.FormatResourceForCLIWithNamespace(obj))
+ return
+ }
+ recorder.Warningf(fmt.Sprintf("%sCreateFailed", gvk.Kind), "Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr)
+}
+
+func reportUpdateEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) {
+ gvk := resourcehelper.GuessObjectGroupVersionKind(obj)
+ switch {
+ case originalErr != nil:
+ recorder.Warningf(fmt.Sprintf("%sUpdateFailed", gvk.Kind), "Failed to update %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr)
+ case len(details) == 0:
+ recorder.Eventf(fmt.Sprintf("%sUpdated", gvk.Kind), "Updated %s because it changed", resourcehelper.FormatResourceForCLIWithNamespace(obj))
+ default:
+ recorder.Eventf(fmt.Sprintf("%sUpdated", gvk.Kind), "Updated %s:\n%s", resourcehelper.FormatResourceForCLIWithNamespace(obj), strings.Join(details, "\n"))
+ }
+}
+
+func reportDeleteEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) {
+ gvk := resourcehelper.GuessObjectGroupVersionKind(obj)
+ switch {
+ case originalErr != nil:
+ recorder.Warningf(fmt.Sprintf("%sDeleteFailed", gvk.Kind), "Failed to delete %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr)
+ case len(details) == 0:
+ recorder.Eventf(fmt.Sprintf("%sDeleted", gvk.Kind), "Deleted %s", resourcehelper.FormatResourceForCLIWithNamespace(obj))
+ default:
+ recorder.Eventf(fmt.Sprintf("%sDeleted", gvk.Kind), "Deleted %s:\n%s", resourcehelper.FormatResourceForCLIWithNamespace(obj), strings.Join(details, "\n"))
+ }
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go
new file mode 100644
index 0000000000..c32c330bcb
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go
@@ -0,0 +1,371 @@
+package resourceapply
+
+import (
+ "context"
+ "fmt"
+
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ corev1 "k8s.io/api/core/v1"
+ policyv1 "k8s.io/api/policy/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ storagev1 "k8s.io/api/storage/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/kubernetes"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+ migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+ migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourceread"
+ "github.com/openshift/library-go/pkg/operator/v1helpers"
+)
+
+type AssetFunc func(name string) ([]byte, error)
+
+type ApplyResult struct {
+ File string
+ Type string
+ Result runtime.Object
+ Changed bool
+ Error error
+}
+
+// ConditionalFunction provides needed dependency for a resource on another condition instead of blindly creating
+// a resource. This conditional function can also be used to delete the resource when not needed
+type ConditionalFunction func() bool
+
+type ClientHolder struct {
+ kubeClient kubernetes.Interface
+ apiExtensionsClient apiextensionsclient.Interface
+ kubeInformers v1helpers.KubeInformersForNamespaces
+ dynamicClient dynamic.Interface
+ migrationClient migrationclient.Interface
+}
+
+func NewClientHolder() *ClientHolder {
+ return &ClientHolder{}
+}
+
+func NewKubeClientHolder(client kubernetes.Interface) *ClientHolder {
+ return NewClientHolder().WithKubernetes(client)
+}
+
+func (c *ClientHolder) WithKubernetes(client kubernetes.Interface) *ClientHolder {
+ c.kubeClient = client
+ return c
+}
+
+func (c *ClientHolder) WithKubernetesInformers(kubeInformers v1helpers.KubeInformersForNamespaces) *ClientHolder {
+ c.kubeInformers = kubeInformers
+ return c
+}
+
+func (c *ClientHolder) WithAPIExtensionsClient(client apiextensionsclient.Interface) *ClientHolder {
+ c.apiExtensionsClient = client
+ return c
+}
+
+func (c *ClientHolder) WithDynamicClient(client dynamic.Interface) *ClientHolder {
+ c.dynamicClient = client
+ return c
+}
+
+func (c *ClientHolder) WithMigrationClient(client migrationclient.Interface) *ClientHolder {
+ c.migrationClient = client
+ return c
+}
+
+// ApplyDirectly applies the given manifest files to API server.
+func ApplyDirectly(ctx context.Context, clients *ClientHolder, recorder events.Recorder, cache ResourceCache, manifests AssetFunc, files ...string) []ApplyResult {
+ ret := []ApplyResult{}
+
+ for _, file := range files {
+ result := ApplyResult{File: file}
+ objBytes, err := manifests(file)
+ if err != nil {
+ result.Error = fmt.Errorf("missing %q: %v", file, err)
+ ret = append(ret, result)
+ continue
+ }
+ requiredObj, err := resourceread.ReadGenericWithUnstructured(objBytes)
+ if err != nil {
+ result.Error = fmt.Errorf("cannot decode %q: %v", file, err)
+ ret = append(ret, result)
+ continue
+ }
+ result.Type = fmt.Sprintf("%T", requiredObj)
+
+ // NOTE: Do not add CR resources into this switch otherwise the protobuf client can cause problems.
+ switch t := requiredObj.(type) {
+ case *corev1.Namespace:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyNamespaceImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache)
+ }
+ case *corev1.Service:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyServiceImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache)
+ }
+ case *corev1.Pod:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyPodImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache)
+ }
+ case *corev1.ServiceAccount:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyServiceAccountImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache)
+ }
+ case *corev1.ConfigMap:
+ client := clients.configMapsGetter()
+ if client == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyConfigMapImproved(ctx, client, recorder, t, cache)
+ }
+ case *corev1.Secret:
+ client := clients.secretsGetter()
+ if client == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplySecretImproved(ctx, client, recorder, t, cache)
+ }
+ case *rbacv1.ClusterRole:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyClusterRole(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *rbacv1.ClusterRoleBinding:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyClusterRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *rbacv1.Role:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyRole(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *rbacv1.RoleBinding:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *policyv1.PodDisruptionBudget:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyPodDisruptionBudget(ctx, clients.kubeClient.PolicyV1(), recorder, t)
+ }
+ case *apiextensionsv1.CustomResourceDefinition:
+ if clients.apiExtensionsClient == nil {
+ result.Error = fmt.Errorf("missing apiExtensionsClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyCustomResourceDefinitionV1(ctx, clients.apiExtensionsClient.ApiextensionsV1(), recorder, t)
+ }
+ case *storagev1.StorageClass:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyStorageClass(ctx, clients.kubeClient.StorageV1(), recorder, t)
+ }
+ case *admissionregistrationv1.ValidatingWebhookConfiguration:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyValidatingWebhookConfigurationImproved(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache)
+ }
+ case *admissionregistrationv1.MutatingWebhookConfiguration:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyMutatingWebhookConfigurationImproved(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache)
+ }
+ case *storagev1.CSIDriver:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyCSIDriver(ctx, clients.kubeClient.StorageV1(), recorder, t)
+ }
+ case *migrationv1alpha1.StorageVersionMigration:
+ if clients.migrationClient == nil {
+ result.Error = fmt.Errorf("missing migrationClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyStorageVersionMigration(ctx, clients.migrationClient, recorder, t)
+ }
+ case *unstructured.Unstructured:
+ if clients.dynamicClient == nil {
+ result.Error = fmt.Errorf("missing dynamicClient")
+ } else {
+ result.Result, result.Changed, result.Error = ApplyKnownUnstructured(ctx, clients.dynamicClient, recorder, t)
+ }
+ default:
+ result.Error = fmt.Errorf("unhandled type %T", requiredObj)
+ }
+
+ ret = append(ret, result)
+ }
+
+ return ret
+}
+
+func DeleteAll(ctx context.Context, clients *ClientHolder, recorder events.Recorder, manifests AssetFunc,
+ files ...string) []ApplyResult {
+ ret := []ApplyResult{}
+
+ for _, file := range files {
+ result := ApplyResult{File: file}
+ objBytes, err := manifests(file)
+ if err != nil {
+ result.Error = fmt.Errorf("missing %q: %v", file, err)
+ ret = append(ret, result)
+ continue
+ }
+ requiredObj, err := resourceread.ReadGenericWithUnstructured(objBytes)
+ if err != nil {
+ result.Error = fmt.Errorf("cannot decode %q: %v", file, err)
+ ret = append(ret, result)
+ continue
+ }
+ result.Type = fmt.Sprintf("%T", requiredObj)
+ // NOTE: Do not add CR resources into this switch otherwise the protobuf client can cause problems.
+ switch t := requiredObj.(type) {
+ case *corev1.Namespace:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteNamespace(ctx, clients.kubeClient.CoreV1(), recorder, t)
+ }
+ case *corev1.Service:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteService(ctx, clients.kubeClient.CoreV1(), recorder, t)
+ }
+ case *corev1.Pod:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeletePod(ctx, clients.kubeClient.CoreV1(), recorder, t)
+ }
+ case *corev1.ServiceAccount:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteServiceAccount(ctx, clients.kubeClient.CoreV1(), recorder, t)
+ }
+ case *corev1.ConfigMap:
+ client := clients.configMapsGetter()
+ if client == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteConfigMap(ctx, client, recorder, t)
+ }
+ case *corev1.Secret:
+ client := clients.secretsGetter()
+ if client == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteSecret(ctx, client, recorder, t)
+ }
+ case *rbacv1.ClusterRole:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteClusterRole(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *rbacv1.ClusterRoleBinding:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteClusterRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *rbacv1.Role:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteRole(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *rbacv1.RoleBinding:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t)
+ }
+ case *policyv1.PodDisruptionBudget:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeletePodDisruptionBudget(ctx, clients.kubeClient.PolicyV1(), recorder, t)
+ }
+ case *apiextensionsv1.CustomResourceDefinition:
+ if clients.apiExtensionsClient == nil {
+ result.Error = fmt.Errorf("missing apiExtensionsClient")
+ } else {
+ _, result.Changed, result.Error = DeleteCustomResourceDefinitionV1(ctx, clients.apiExtensionsClient.ApiextensionsV1(), recorder, t)
+ }
+ case *storagev1.StorageClass:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteStorageClass(ctx, clients.kubeClient.StorageV1(), recorder, t)
+ }
+ case *storagev1.CSIDriver:
+ if clients.kubeClient == nil {
+ result.Error = fmt.Errorf("missing kubeClient")
+ } else {
+ _, result.Changed, result.Error = DeleteCSIDriver(ctx, clients.kubeClient.StorageV1(), recorder, t)
+ }
+ case *migrationv1alpha1.StorageVersionMigration:
+ if clients.migrationClient == nil {
+ result.Error = fmt.Errorf("missing migrationClient")
+ } else {
+ _, result.Changed, result.Error = DeleteStorageVersionMigration(ctx, clients.migrationClient, recorder, t)
+ }
+ case *unstructured.Unstructured:
+ if clients.dynamicClient == nil {
+ result.Error = fmt.Errorf("missing dynamicClient")
+ } else {
+ _, result.Changed, result.Error = DeleteKnownUnstructured(ctx, clients.dynamicClient, recorder, t)
+ }
+ default:
+ result.Error = fmt.Errorf("unhandled type %T", requiredObj)
+ }
+
+ ret = append(ret, result)
+ }
+
+ return ret
+}
+
+func (c *ClientHolder) configMapsGetter() corev1client.ConfigMapsGetter {
+ if c.kubeClient == nil {
+ return nil
+ }
+ if c.kubeInformers == nil {
+ return c.kubeClient.CoreV1()
+ }
+ return v1helpers.CachedConfigMapGetter(c.kubeClient.CoreV1(), c.kubeInformers)
+}
+
+func (c *ClientHolder) secretsGetter() corev1client.SecretsGetter {
+ if c.kubeClient == nil {
+ return nil
+ }
+ if c.kubeInformers == nil {
+ return c.kubeClient.CoreV1()
+ }
+ return v1helpers.CachedSecretGetter(c.kubeClient.CoreV1(), c.kubeInformers)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go
new file mode 100644
index 0000000000..ac9699affe
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go
@@ -0,0 +1,70 @@
+package resourceapply
+
+import (
+ "fmt"
+
+ patch "github.com/evanphx/json-patch"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// JSONPatchNoError generates a JSON patch between original and modified objects and return the JSON as a string.
+// Note:
+//
+// In case of error, the returned string will contain the error messages.
+func JSONPatchNoError(original, modified runtime.Object) string {
+ if original == nil {
+ return "original object is nil"
+ }
+ if modified == nil {
+ return "modified object is nil"
+ }
+ originalJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, original)
+ if err != nil {
+ return fmt.Sprintf("unable to decode original to JSON: %v", err)
+ }
+ modifiedJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, modified)
+ if err != nil {
+ return fmt.Sprintf("unable to decode modified to JSON: %v", err)
+ }
+ patchBytes, err := patch.CreateMergePatch(originalJSON, modifiedJSON)
+ if err != nil {
+ return fmt.Sprintf("unable to create JSON patch: %v", err)
+ }
+ return string(patchBytes)
+}
+
+// JSONPatchSecretNoError generates a JSON patch between original and modified secrets, hiding its data,
+// and return the JSON as a string.
+//
+// Note:
+// In case of error, the returned string will contain the error messages.
+func JSONPatchSecretNoError(original, modified *corev1.Secret) string {
+ if original == nil {
+ return "original object is nil"
+ }
+ if modified == nil {
+ return "modified object is nil"
+ }
+
+ safeModified := modified.DeepCopy()
+ safeOriginal := original.DeepCopy()
+
+ for s := range safeOriginal.Data {
+ safeOriginal.Data[s] = []byte("OLD")
+ }
+ for s := range safeModified.Data {
+ if _, preoriginal := original.Data[s]; !preoriginal {
+ safeModified.Data[s] = []byte("NEW")
+ } else if !equality.Semantic.DeepEqual(original.Data[s], safeModified.Data[s]) {
+ safeModified.Data[s] = []byte("MODIFIED")
+ } else {
+ safeModified.Data[s] = []byte("OLD")
+ }
+ }
+
+ return JSONPatchNoError(safeOriginal, safeModified)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go
new file mode 100644
index 0000000000..d6df1f589f
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go
@@ -0,0 +1,59 @@
+package resourceapply
+
+import (
+ "context"
+ "reflect"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+ migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+ migrationclientv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset"
+)
+
+// ApplyStorageVersionMigration merges objectmeta and required data.
+func ApplyStorageVersionMigration(ctx context.Context, client migrationclientv1alpha1.Interface, recorder events.Recorder, required *migrationv1alpha1.StorageVersionMigration) (*migrationv1alpha1.StorageVersionMigration, bool, error) {
+ clientInterface := client.MigrationV1alpha1().StorageVersionMigrations()
+ existing, err := clientInterface.Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := clientInterface.Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*v1alpha1.StorageVersionMigration), metav1.CreateOptions{})
+ reportCreateEvent(recorder, requiredCopy, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ if !*modified && reflect.DeepEqual(existingCopy.Spec, required.Spec) {
+ return existingCopy, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("StorageVersionMigration %q changes: %v", required.Name, JSONPatchNoError(existing, required))
+ }
+
+ required.Spec.Resource.DeepCopyInto(&existingCopy.Spec.Resource)
+ actual, err := clientInterface.Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
+
+func DeleteStorageVersionMigration(ctx context.Context, client migrationclientv1alpha1.Interface, recorder events.Recorder, required *migrationv1alpha1.StorageVersionMigration) (*migrationv1alpha1.StorageVersionMigration, bool, error) {
+ clientInterface := client.MigrationV1alpha1().StorageVersionMigrations()
+ err := clientInterface.Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go
new file mode 100644
index 0000000000..98ad5b0dfc
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go
@@ -0,0 +1,168 @@
+package resourceapply
+
+import (
+ "context"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/klog/v2"
+)
+
+var serviceMonitorGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"}
+
+func ensureGenericSpec(required, existing *unstructured.Unstructured, mimicDefaultingFn mimicDefaultingFunc, equalityChecker equalityChecker) (*unstructured.Unstructured, bool, error) {
+ requiredCopy := required.DeepCopy()
+ mimicDefaultingFn(requiredCopy)
+ requiredSpec, _, err := unstructured.NestedMap(requiredCopy.UnstructuredContent(), "spec")
+ if err != nil {
+ return nil, false, err
+ }
+ existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec")
+ if err != nil {
+ return nil, false, err
+ }
+
+ if equalityChecker.DeepEqual(existingSpec, requiredSpec) {
+ return existing, false, nil
+ }
+
+ existingCopy := existing.DeepCopy()
+ if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), requiredSpec, "spec"); err != nil {
+ return nil, true, err
+ }
+
+ return existingCopy, true, nil
+}
+
+// mimicDefaultingFunc is used to set fields that are defaulted. This allows for sparse manifests to apply correctly.
+// For instance, if field .spec.foo is set to 10 if not set, then a function of this type could be used to set
+// the field to 10 to match the comparison. This is soemtimes (often?) easier than updating the semantic equality.
+// We often see this in places like RBAC and CRD. Logically it can happen generically too.
+type mimicDefaultingFunc func(obj *unstructured.Unstructured)
+
+func noDefaulting(obj *unstructured.Unstructured) {}
+
+// equalityChecker allows for custom equality comparisons. This can be used to allow equality checks to skip certain
+// operator managed fields. This capability allows something like .spec.scale to be specified or changed by a component
+// like HPA. Use this capability sparingly. Most places ought to just use `equality.Semantic`
+type equalityChecker interface {
+ DeepEqual(a1, a2 interface{}) bool
+}
+
+// ApplyServiceMonitor applies the Prometheus service monitor.
+func ApplyServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ namespace := required.GetNamespace()
+ existing, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{})
+ if errors.IsNotFound(err) {
+ newObj, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{})
+ if createErr != nil {
+ recorder.Warningf("ServiceMonitorCreateFailed", "Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v", createErr)
+ return nil, true, createErr
+ }
+ recorder.Eventf("ServiceMonitorCreated", "Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing")
+ return newObj, true, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ existingCopy := existing.DeepCopy()
+
+ toUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if !modified {
+ return nil, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("ServiceMonitor %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, toUpdate))
+ }
+
+ newObj, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
+ if err != nil {
+ recorder.Warningf("ServiceMonitorUpdateFailed", "Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v", err)
+ return nil, true, err
+ }
+
+ recorder.Eventf("ServiceMonitorUpdated", "Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed")
+ return newObj, true, err
+}
+
+var prometheusRuleGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheusrules"}
+
+// ApplyPrometheusRule applies the PrometheusRule
+func ApplyPrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ namespace := required.GetNamespace()
+
+ existing, err := client.Resource(prometheusRuleGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{})
+ if errors.IsNotFound(err) {
+ newObj, createErr := client.Resource(prometheusRuleGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{})
+ if createErr != nil {
+ recorder.Warningf("PrometheusRuleCreateFailed", "Failed to create PrometheusRule.monitoring.coreos.com/v1: %v", createErr)
+ return nil, true, createErr
+ }
+ recorder.Eventf("PrometheusRuleCreated", "Created PrometheusRule.monitoring.coreos.com/v1 because it was missing")
+ return newObj, true, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ existingCopy := existing.DeepCopy()
+
+ toUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if !modified {
+ return nil, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("PrometheusRule %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, toUpdate))
+ }
+
+ newObj, err := client.Resource(prometheusRuleGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
+ if err != nil {
+ recorder.Warningf("PrometheusRuleUpdateFailed", "Failed to update PrometheusRule.monitoring.coreos.com/v1: %v", err)
+ return nil, true, err
+ }
+
+ recorder.Eventf("PrometheusRuleUpdated", "Updated PrometheusRule.monitoring.coreos.com/v1 because it changed")
+ return newObj, true, err
+}
+
+func DeletePrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ namespace := required.GetNamespace()
+ err := client.Resource(prometheusRuleGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{})
+ if err != nil && errors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ namespace := required.GetNamespace()
+ err := client.Resource(serviceMonitorGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{})
+ if err != nil && errors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go
new file mode 100644
index 0000000000..5bfe3b3896
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go
@@ -0,0 +1,60 @@
+package resourceapply
+
+import (
+ "context"
+
+ policyv1 "k8s.io/api/policy/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ policyclientv1 "k8s.io/client-go/kubernetes/typed/policy/v1"
+ "k8s.io/klog/v2"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+)
+
+func ApplyPodDisruptionBudget(ctx context.Context, client policyclientv1.PodDisruptionBudgetsGetter, recorder events.Recorder, required *policyv1.PodDisruptionBudget) (*policyv1.PodDisruptionBudget, bool, error) {
+ existing, err := client.PodDisruptionBudgets(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.PodDisruptionBudgets(required.Namespace).Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*policyv1.PodDisruptionBudget), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ contentSame := equality.Semantic.DeepEqual(existingCopy.Spec, required.Spec)
+ if contentSame && !*modified {
+ return existingCopy, false, nil
+ }
+
+ existingCopy.Spec = required.Spec
+
+ if klog.V(4).Enabled() {
+ klog.Infof("PodDisruptionBudget %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy))
+ }
+
+ actual, err := client.PodDisruptionBudgets(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
+
+func DeletePodDisruptionBudget(ctx context.Context, client policyclientv1.PodDisruptionBudgetsGetter, recorder events.Recorder, required *policyv1.PodDisruptionBudget) (*policyv1.PodDisruptionBudget, bool, error) {
+ err := client.PodDisruptionBudgets(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go
new file mode 100644
index 0000000000..0e378edd2d
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go
@@ -0,0 +1,246 @@
+package resourceapply
+
+import (
+ "context"
+ "fmt"
+
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
+ "k8s.io/klog/v2"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+)
+
+// ApplyClusterRole merges objectmeta, requires rules, aggregation rules are not allowed for now.
+func ApplyClusterRole(ctx context.Context, client rbacclientv1.ClusterRolesGetter, recorder events.Recorder, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) {
+ if required.AggregationRule != nil && len(required.AggregationRule.ClusterRoleSelectors) != 0 {
+ return nil, false, fmt.Errorf("cannot create an aggregated cluster role")
+ }
+
+ existing, err := client.ClusterRoles().Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.ClusterRoles().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.ClusterRole), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules)
+ if contentSame && !*modified {
+ return existingCopy, false, nil
+ }
+
+ existingCopy.Rules = required.Rules
+ existingCopy.AggregationRule = nil
+
+ if klog.V(4).Enabled() {
+ klog.Infof("ClusterRole %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy))
+ }
+
+ actual, err := client.ClusterRoles().Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
+
+// ApplyClusterRoleBinding merges objectmeta, requires subjects and role refs
+// TODO on non-matching roleref, delete and recreate
+func ApplyClusterRoleBinding(ctx context.Context, client rbacclientv1.ClusterRoleBindingsGetter, recorder events.Recorder, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) {
+ existing, err := client.ClusterRoleBindings().Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.ClusterRoleBindings().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.ClusterRoleBinding), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+ requiredCopy := required.DeepCopy()
+
+ // Enforce apiGroup fields in roleRefs
+ existingCopy.RoleRef.APIGroup = rbacv1.GroupName
+ for i := range existingCopy.Subjects {
+ if existingCopy.Subjects[i].Kind == "User" {
+ existingCopy.Subjects[i].APIGroup = rbacv1.GroupName
+ }
+ }
+
+ requiredCopy.RoleRef.APIGroup = rbacv1.GroupName
+ for i := range requiredCopy.Subjects {
+ if requiredCopy.Subjects[i].Kind == "User" {
+ requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName
+ }
+ }
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta)
+
+ subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects)
+ roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef)
+
+ if subjectsAreSame && roleRefIsSame && !*modified {
+ return existingCopy, false, nil
+ }
+
+ existingCopy.Subjects = requiredCopy.Subjects
+ existingCopy.RoleRef = requiredCopy.RoleRef
+
+ if klog.V(4).Enabled() {
+ klog.Infof("ClusterRoleBinding %q changes: %v", requiredCopy.Name, JSONPatchNoError(existing, existingCopy))
+ }
+
+ actual, err := client.ClusterRoleBindings().Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, requiredCopy, err)
+ return actual, true, err
+}
+
+// ApplyRole merges objectmeta, requires rules
+func ApplyRole(ctx context.Context, client rbacclientv1.RolesGetter, recorder events.Recorder, required *rbacv1.Role) (*rbacv1.Role, bool, error) {
+ existing, err := client.Roles(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.Roles(required.Namespace).Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.Role), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+ contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules)
+ if contentSame && !*modified {
+ return existingCopy, false, nil
+ }
+
+ existingCopy.Rules = required.Rules
+
+ if klog.V(4).Enabled() {
+ klog.Infof("Role %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, existingCopy))
+ }
+ actual, err := client.Roles(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
+
+// ApplyRoleBinding merges objectmeta, requires subjects and role refs
+// TODO on non-matching roleref, delete and recreate
+func ApplyRoleBinding(ctx context.Context, client rbacclientv1.RoleBindingsGetter, recorder events.Recorder, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) {
+ existing, err := client.RoleBindings(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.RoleBindings(required.Namespace).Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.RoleBinding), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+ requiredCopy := required.DeepCopy()
+
+ // Enforce apiGroup fields in roleRefs and subjects
+ existingCopy.RoleRef.APIGroup = rbacv1.GroupName
+ for i := range existingCopy.Subjects {
+ if existingCopy.Subjects[i].Kind == "User" {
+ existingCopy.Subjects[i].APIGroup = rbacv1.GroupName
+ }
+ }
+
+ requiredCopy.RoleRef.APIGroup = rbacv1.GroupName
+ for i := range requiredCopy.Subjects {
+ if requiredCopy.Subjects[i].Kind == "User" {
+ requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName
+ }
+ }
+
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta)
+
+ subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects)
+ roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef)
+
+ if subjectsAreSame && roleRefIsSame && !*modified {
+ return existingCopy, false, nil
+ }
+
+ existingCopy.Subjects = requiredCopy.Subjects
+ existingCopy.RoleRef = requiredCopy.RoleRef
+
+ if klog.V(4).Enabled() {
+ klog.Infof("RoleBinding %q changes: %v", requiredCopy.Namespace+"/"+requiredCopy.Name, JSONPatchNoError(existing, existingCopy))
+ }
+
+ actual, err := client.RoleBindings(requiredCopy.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, requiredCopy, err)
+ return actual, true, err
+}
+
+func DeleteClusterRole(ctx context.Context, client rbacclientv1.ClusterRolesGetter, recorder events.Recorder, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) {
+ err := client.ClusterRoles().Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteClusterRoleBinding(ctx context.Context, client rbacclientv1.ClusterRoleBindingsGetter, recorder events.Recorder, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) {
+ err := client.ClusterRoleBindings().Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteRole(ctx context.Context, client rbacclientv1.RolesGetter, recorder events.Recorder, required *rbacv1.Role) (*rbacv1.Role, bool, error) {
+ err := client.Roles(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteRoleBinding(ctx context.Context, client rbacclientv1.RoleBindingsGetter, recorder events.Recorder, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) {
+ err := client.RoleBindings(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go
new file mode 100644
index 0000000000..daa1a5e154
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go
@@ -0,0 +1,168 @@
+package resourceapply
+
+import (
+ "crypto/md5"
+ "fmt"
+ "io"
+ "reflect"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/klog/v2"
+)
+
+type cachedVersionKey struct {
+ name string
+ namespace string
+ kind schema.GroupKind
+}
+
+// record of resource metadata used to determine if its safe to return early from an ApplyFoo
+// resourceHash is an ms5 hash of the required in an ApplyFoo that is computed in case the input changes
+// resourceVersion is the received resourceVersion from the apiserver in response to an update that is comparable to the GET
+type cachedResource struct {
+ resourceHash, resourceVersion string
+}
+
+type resourceCache struct {
+ cache map[cachedVersionKey]cachedResource
+}
+
+type ResourceCache interface {
+ UpdateCachedResourceMetadata(required runtime.Object, actual runtime.Object)
+ SafeToSkipApply(required runtime.Object, existing runtime.Object) bool
+}
+
+func NewResourceCache() *resourceCache {
+ return &resourceCache{
+ cache: map[cachedVersionKey]cachedResource{},
+ }
+}
+
+var noCache *resourceCache
+
+func getResourceMetadata(obj runtime.Object) (schema.GroupKind, string, string, string, error) {
+ if obj == nil {
+ return schema.GroupKind{}, "", "", "", fmt.Errorf("nil object has no metadata")
+ }
+ metadata, err := meta.Accessor(obj)
+ if err != nil {
+ return schema.GroupKind{}, "", "", "", err
+ }
+ if metadata == nil || reflect.ValueOf(metadata).IsNil() {
+ return schema.GroupKind{}, "", "", "", fmt.Errorf("object has no metadata")
+ }
+ resourceHash := hashOfResourceStruct(obj)
+
+ // retrieve kind, sometimes this can be done via the accesor, sometimes not (depends on the type)
+ kind := schema.GroupKind{}
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ if len(gvk.Kind) > 0 {
+ kind = gvk.GroupKind()
+ } else {
+ if currKind := getCoreGroupKind(obj); currKind != nil {
+ kind = *currKind
+ }
+ }
+ if len(kind.Kind) == 0 {
+ return schema.GroupKind{}, "", "", "", fmt.Errorf("unable to determine GroupKind of %T", obj)
+ }
+
+ return kind, metadata.GetName(), metadata.GetNamespace(), resourceHash, nil
+}
+
+func getResourceVersion(obj runtime.Object) (string, error) {
+ if obj == nil {
+ return "", fmt.Errorf("nil object has no resourceVersion")
+ }
+ metadata, err := meta.Accessor(obj)
+ if err != nil {
+ return "", err
+ }
+ if metadata == nil || reflect.ValueOf(metadata).IsNil() {
+ return "", fmt.Errorf("object has no metadata")
+ }
+ rv := metadata.GetResourceVersion()
+ if len(rv) == 0 {
+ return "", fmt.Errorf("missing resourceVersion")
+ }
+
+ return rv, nil
+}
+
+func (c *resourceCache) UpdateCachedResourceMetadata(required runtime.Object, actual runtime.Object) {
+ if c == nil || c.cache == nil {
+ return
+ }
+ if required == nil || actual == nil {
+ return
+ }
+ kind, name, namespace, resourceHash, err := getResourceMetadata(required)
+ if err != nil {
+ return
+ }
+ cacheKey := cachedVersionKey{
+ name: name,
+ namespace: namespace,
+ kind: kind,
+ }
+
+ resourceVersion, err := getResourceVersion(actual)
+ if err != nil {
+ klog.V(4).Infof("error reading resourceVersion %s:%s:%s %s", name, kind, namespace, err)
+ return
+ }
+
+ c.cache[cacheKey] = cachedResource{resourceHash, resourceVersion}
+ klog.V(7).Infof("updated resourceVersion of %s:%s:%s %s", name, kind, namespace, resourceVersion)
+}
+
+// in the circumstance that an ApplyFoo's 'required' is the same one which was previously
+// applied for a given (name, kind, namespace) and the existing resource (if any),
+// hasn't been modified since the ApplyFoo last updated that resource, then return true (we don't
+// need to reapply the resource). Otherwise return false.
+func (c *resourceCache) SafeToSkipApply(required runtime.Object, existing runtime.Object) bool {
+ if c == nil || c.cache == nil {
+ return false
+ }
+ if required == nil || existing == nil {
+ return false
+ }
+ kind, name, namespace, resourceHash, err := getResourceMetadata(required)
+ if err != nil {
+ return false
+ }
+ cacheKey := cachedVersionKey{
+ name: name,
+ namespace: namespace,
+ kind: kind,
+ }
+
+ resourceVersion, err := getResourceVersion(existing)
+ if err != nil {
+ return false
+ }
+
+ var versionMatch, hashMatch bool
+ if cached, exists := c.cache[cacheKey]; exists {
+ versionMatch = cached.resourceVersion == resourceVersion
+ hashMatch = cached.resourceHash == resourceHash
+ if versionMatch && hashMatch {
+ klog.V(4).Infof("found matching resourceVersion & manifest hash")
+ return true
+ }
+ }
+
+ return false
+}
+
+// detect changes in a resource by caching a hash of the string representation of the resource
+// note: some changes in a resource e.g. nil vs empty, will not be detected this way
+func hashOfResourceStruct(o interface{}) string {
+ oString := fmt.Sprintf("%v", o)
+ h := md5.New()
+ io.WriteString(h, oString)
+ rval := fmt.Sprintf("%x", h.Sum(nil))
+ return rval
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go
new file mode 100644
index 0000000000..f9bbf0f1ad
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go
@@ -0,0 +1,241 @@
+package resourceapply
+
+import (
+ "context"
+ "fmt"
+
+ storagev1 "k8s.io/api/storage/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ storageclientv1 "k8s.io/client-go/kubernetes/typed/storage/v1"
+ "k8s.io/klog/v2"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
+)
+
+const (
+ // Label on the CSIDriver to declare the driver's effective pod security profile
+ csiInlineVolProfileLabel = "security.openshift.io/csi-ephemeral-volume-profile"
+)
+
+var (
+ // Exempt labels are not overwritten if the value has changed
+ exemptCSIDriverLabels = []string{
+ csiInlineVolProfileLabel,
+ }
+)
+
+// ApplyStorageClass merges objectmeta, tries to write everything else
+func ApplyStorageClass(ctx context.Context, client storageclientv1.StorageClassesGetter, recorder events.Recorder, required *storagev1.StorageClass) (*storagev1.StorageClass, bool,
+ error) {
+ existing, err := client.StorageClasses().Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.StorageClasses().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*storagev1.StorageClass), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ // First, let's compare ObjectMeta from both objects
+ modified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+ resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta)
+
+ // Second, let's compare the other fields. StorageClass doesn't have a spec and we don't
+ // want to miss fields, so we have to copy required to get all fields
+ // and then overwrite ObjectMeta and TypeMeta from the original.
+ requiredCopy := required.DeepCopy()
+ requiredCopy.ObjectMeta = *existingCopy.ObjectMeta.DeepCopy()
+ requiredCopy.TypeMeta = existingCopy.TypeMeta
+
+ contentSame := equality.Semantic.DeepEqual(existingCopy, requiredCopy)
+ if contentSame && !*modified {
+ return existing, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("StorageClass %q changes: %v", required.Name, JSONPatchNoError(existingCopy, requiredCopy))
+ }
+
+ if storageClassNeedsRecreate(existingCopy, requiredCopy) {
+ requiredCopy.ObjectMeta.ResourceVersion = ""
+ err = client.StorageClasses().Delete(ctx, existingCopy.Name, metav1.DeleteOptions{})
+ reportDeleteEvent(recorder, requiredCopy, err, "Deleting StorageClass to re-create it with updated parameters")
+ if err != nil && !apierrors.IsNotFound(err) {
+ return existing, false, err
+ }
+ actual, err := client.StorageClasses().Create(ctx, requiredCopy, metav1.CreateOptions{})
+ if err != nil && apierrors.IsAlreadyExists(err) {
+ // Delete() few lines above did not really delete the object,
+ // the API server is probably waiting for a finalizer removal or so.
+ // Report an error, but something else than "Already exists", because
+ // that would be very confusing - Apply failed because the object
+ // already exists???
+ err = fmt.Errorf("failed to re-create StorageClass %s, waiting for the original object to be deleted", existingCopy.Name)
+ } else if err != nil {
+ err = fmt.Errorf("failed to re-create StorageClass %s: %s", existingCopy.Name, err)
+ }
+ reportCreateEvent(recorder, actual, err)
+ return actual, true, err
+ }
+
+ // Only mutable fields need a change
+ actual, err := client.StorageClasses().Update(ctx, requiredCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+}
+
+func storageClassNeedsRecreate(oldSC, newSC *storagev1.StorageClass) bool {
+ // Based on kubernetes/kubernetes/pkg/apis/storage/validation/validation.go,
+ // these fields are immutable.
+ if !equality.Semantic.DeepEqual(oldSC.Parameters, newSC.Parameters) {
+ return true
+ }
+ if oldSC.Provisioner != newSC.Provisioner {
+ return true
+ }
+
+ // In theory, ReclaimPolicy is always set, just in case:
+ if (oldSC.ReclaimPolicy == nil && newSC.ReclaimPolicy != nil) ||
+ (oldSC.ReclaimPolicy != nil && newSC.ReclaimPolicy == nil) {
+ return true
+ }
+ if oldSC.ReclaimPolicy != nil && newSC.ReclaimPolicy != nil && *oldSC.ReclaimPolicy != *newSC.ReclaimPolicy {
+ return true
+ }
+
+ if !equality.Semantic.DeepEqual(oldSC.VolumeBindingMode, newSC.VolumeBindingMode) {
+ return true
+ }
+ return false
+}
+
+// ApplyCSIDriver merges objectmeta, does not worry about anything else
+func ApplyCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter, recorder events.Recorder, requiredOriginal *storagev1.CSIDriver) (*storagev1.CSIDriver, bool, error) {
+
+ required := requiredOriginal.DeepCopy()
+ if required.Annotations == nil {
+ required.Annotations = map[string]string{}
+ }
+ if err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec); err != nil {
+ return nil, false, err
+ }
+ if err := validateRequiredCSIDriverLabels(required); err != nil {
+ return nil, false, err
+ }
+
+ existing, err := client.CSIDrivers().Get(ctx, required.Name, metav1.GetOptions{})
+ if apierrors.IsNotFound(err) {
+ requiredCopy := required.DeepCopy()
+ actual, err := client.CSIDrivers().Create(
+ ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*storagev1.CSIDriver), metav1.CreateOptions{})
+ reportCreateEvent(recorder, required, err)
+ return actual, true, err
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ // Exempt labels are not overwritten if the value has changed. They get set
+ // once during creation, but the admin may choose to set a different value.
+ // If the label is removed, it reverts back to the default value.
+ for _, exemptLabel := range exemptCSIDriverLabels {
+ if existingValue, ok := existing.Labels[exemptLabel]; ok {
+ required.Labels[exemptLabel] = existingValue
+ }
+ }
+
+ metadataModified := resourcemerge.BoolPtr(false)
+ existingCopy := existing.DeepCopy()
+ resourcemerge.EnsureObjectMeta(metadataModified, &existingCopy.ObjectMeta, required.ObjectMeta)
+
+ requiredSpecHash := required.Annotations[specHashAnnotation]
+ existingSpecHash := existing.Annotations[specHashAnnotation]
+ sameSpec := requiredSpecHash == existingSpecHash
+ if sameSpec && !*metadataModified {
+ return existing, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("CSIDriver %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy))
+ }
+
+ if sameSpec {
+ // Update metadata by a simple Update call
+ actual, err := client.CSIDrivers().Update(ctx, existingCopy, metav1.UpdateOptions{})
+ reportUpdateEvent(recorder, required, err)
+ return actual, true, err
+ }
+
+ existingCopy.Spec = required.Spec
+ existingCopy.ObjectMeta.ResourceVersion = ""
+ // Spec is read-only after creation. Delete and re-create the object
+ err = client.CSIDrivers().Delete(ctx, existingCopy.Name, metav1.DeleteOptions{})
+ reportDeleteEvent(recorder, existingCopy, err, "Deleting CSIDriver to re-create it with updated parameters")
+ if err != nil && !apierrors.IsNotFound(err) {
+ return existing, false, err
+ }
+ actual, err := client.CSIDrivers().Create(ctx, existingCopy, metav1.CreateOptions{})
+ if err != nil && apierrors.IsAlreadyExists(err) {
+ // Delete() few lines above did not really delete the object,
+ // the API server is probably waiting for a finalizer removal or so.
+ // Report an error, but something else than "Already exists", because
+ // that would be very confusing - Apply failed because the object
+ // already exists???
+ err = fmt.Errorf("failed to re-create CSIDriver object %s, waiting for the original object to be deleted", existingCopy.Name)
+ } else if err != nil {
+ err = fmt.Errorf("failed to re-create CSIDriver %s: %s", existingCopy.Name, err)
+ }
+ reportCreateEvent(recorder, existingCopy, err)
+ return actual, true, err
+}
+
+func validateRequiredCSIDriverLabels(required *storagev1.CSIDriver) error {
+ supportsEphemeralVolumes := false
+ for _, mode := range required.Spec.VolumeLifecycleModes {
+ if mode == storagev1.VolumeLifecycleEphemeral {
+ supportsEphemeralVolumes = true
+ break
+ }
+ }
+ // All OCP managed CSI drivers that support the Ephemeral volume
+ // lifecycle mode must provide a profile label the be matched against
+ // the pod security policy for the namespace of the pod.
+ // Valid values are: restricted, baseline, privileged.
+ _, labelFound := required.Labels[csiInlineVolProfileLabel]
+ if supportsEphemeralVolumes && !labelFound {
+ return fmt.Errorf("CSIDriver %s supports Ephemeral volume lifecycle but is missing required label %s", required.Name, csiInlineVolProfileLabel)
+ }
+ return nil
+}
+
+func DeleteStorageClass(ctx context.Context, client storageclientv1.StorageClassesGetter, recorder events.Recorder, required *storagev1.StorageClass) (*storagev1.StorageClass, bool,
+ error) {
+ err := client.StorageClasses().Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
+
+func DeleteCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter, recorder events.Recorder, required *storagev1.CSIDriver) (*storagev1.CSIDriver, bool, error) {
+ err := client.CSIDrivers().Delete(ctx, required.Name, metav1.DeleteOptions{})
+ if err != nil && apierrors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go
new file mode 100644
index 0000000000..1adb01aeeb
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go
@@ -0,0 +1,42 @@
+package resourceapply
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+)
+
+// ApplyKnownUnstructured applies few selected Unstructured types, where it semantic knowledge
+// to merge existing & required objects intelligently. Feel free to add more.
+func ApplyKnownUnstructured(ctx context.Context, client dynamic.Interface, recorder events.Recorder, obj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ switch obj.GetObjectKind().GroupVersionKind().GroupKind() {
+ case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "ServiceMonitor"}:
+ return ApplyServiceMonitor(ctx, client, recorder, obj)
+ case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "PrometheusRule"}:
+ return ApplyPrometheusRule(ctx, client, recorder, obj)
+ case schema.GroupKind{Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshotClass"}:
+ return ApplyVolumeSnapshotClass(ctx, client, recorder, obj)
+
+ }
+
+ return nil, false, fmt.Errorf("unsupported object type: %s", obj.GetKind())
+}
+
+// DeleteKnownUnstructured deletes few selected Unstructured types
+func DeleteKnownUnstructured(ctx context.Context, client dynamic.Interface, recorder events.Recorder, obj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ switch obj.GetObjectKind().GroupVersionKind().GroupKind() {
+ case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "ServiceMonitor"}:
+ return DeleteServiceMonitor(ctx, client, recorder, obj)
+ case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "PrometheusRule"}:
+ return DeletePrometheusRule(ctx, client, recorder, obj)
+ case schema.GroupKind{Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshotClass"}:
+ return DeleteVolumeSnapshotClass(ctx, client, recorder, obj)
+
+ }
+
+ return nil, false, fmt.Errorf("unsupported object type: %s", obj.GetKind())
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/volumesnapshotclass.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/volumesnapshotclass.go
new file mode 100644
index 0000000000..1a35b6d770
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/volumesnapshotclass.go
@@ -0,0 +1,129 @@
+package resourceapply
+
+import (
+ "context"
+
+ "k8s.io/klog/v2"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+
+ "github.com/openshift/library-go/pkg/operator/events"
+)
+
+const (
+ VolumeSnapshotClassGroup = "snapshot.storage.k8s.io"
+ VolumeSnapshotClassVersion = "v1"
+ VolumeSnapshotClassResource = "volumesnapshotclasses"
+)
+
+var volumeSnapshotClassResourceGVR schema.GroupVersionResource = schema.GroupVersionResource{
+ Group: VolumeSnapshotClassGroup,
+ Version: VolumeSnapshotClassVersion,
+ Resource: VolumeSnapshotClassResource,
+}
+
+func ensureGenericVolumeSnapshotClass(required, existing *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ var existingCopy *unstructured.Unstructured
+
+ // Apply "parameters"
+ requiredParameters, _, err := unstructured.NestedMap(required.UnstructuredContent(), "parameters")
+ if err != nil {
+ return nil, false, err
+ }
+ existingParameters, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "parameters")
+ if err != nil {
+ return nil, false, err
+ }
+ if !equality.Semantic.DeepEqual(existingParameters, requiredParameters) {
+ if existingCopy == nil {
+ existingCopy = existing.DeepCopy()
+ }
+ if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), requiredParameters, "parameters"); err != nil {
+ return nil, true, err
+ }
+ }
+
+ // Apply "driver" and "deletionPolicy"
+ for _, fieldName := range []string{"driver", "deletionPolicy"} {
+ requiredField, _, err := unstructured.NestedString(required.UnstructuredContent(), fieldName)
+ if err != nil {
+ return nil, false, err
+ }
+ existingField, _, err := unstructured.NestedString(existing.UnstructuredContent(), fieldName)
+ if err != nil {
+ return nil, false, err
+ }
+ if requiredField != existingField {
+ if existingCopy == nil {
+ existingCopy = existing.DeepCopy()
+ }
+ if err := unstructured.SetNestedField(existingCopy.UnstructuredContent(), requiredField, fieldName); err != nil {
+ return nil, true, err
+ }
+ }
+ }
+
+ // If existingCopy is not nil, then the object has been modified
+ if existingCopy != nil {
+ return existingCopy, true, nil
+ }
+
+ return existing, false, nil
+}
+
+// ApplyVolumeSnapshotClass applies Volume Snapshot Class.
+func ApplyVolumeSnapshotClass(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ existing, err := client.Resource(volumeSnapshotClassResourceGVR).Get(ctx, required.GetName(), metav1.GetOptions{})
+ if errors.IsNotFound(err) {
+ newObj, createErr := client.Resource(volumeSnapshotClassResourceGVR).Create(ctx, required, metav1.CreateOptions{})
+ if createErr != nil {
+ recorder.Warningf("VolumeSnapshotClassCreateFailed", "Failed to create VolumeSnapshotClass.snapshot.storage.k8s.io/v1: %v", createErr)
+ return nil, true, createErr
+ }
+ recorder.Eventf("VolumeSnapshotClassCreated", "Created VolumeSnapshotClass.snapshot.storage.k8s.io/v1 because it was missing")
+ return newObj, true, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+
+ toUpdate, modified, err := ensureGenericVolumeSnapshotClass(required, existing)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if !modified {
+ return existing, false, nil
+ }
+
+ if klog.V(4).Enabled() {
+ klog.Infof("VolumeSnapshotClass %q changes: %v", required.GetName(), JSONPatchNoError(existing, toUpdate))
+ }
+
+ newObj, err := client.Resource(volumeSnapshotClassResourceGVR).Update(ctx, toUpdate, metav1.UpdateOptions{})
+ if err != nil {
+ recorder.Warningf("VolumeSnapshotClassFailed", "Failed to update VolumeSnapshotClass.snapshot.storage.k8s.io/v1: %v", err)
+ return nil, true, err
+ }
+
+ recorder.Eventf("VolumeSnapshotClassUpdated", "Updated VolumeSnapshotClass.snapshot.storage.k8s.io/v1 because it changed")
+ return newObj, true, err
+}
+
+func DeleteVolumeSnapshotClass(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) {
+ namespace := required.GetNamespace()
+ err := client.Resource(volumeSnapshotClassResourceGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{})
+ if err != nil && errors.IsNotFound(err) {
+ return nil, false, nil
+ }
+ if err != nil {
+ return nil, false, err
+ }
+ reportDeleteEvent(recorder, required, err)
+ return nil, true, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go
new file mode 100644
index 0000000000..43ea9111c6
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go
@@ -0,0 +1,76 @@
+package resourcehelper
+
+import (
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/kubernetes/scheme"
+
+ "github.com/openshift/api"
+)
+
+var (
+ openshiftScheme = runtime.NewScheme()
+)
+
+func init() {
+ if err := api.Install(openshiftScheme); err != nil {
+ panic(err)
+ }
+}
+
+// FormatResourceForCLIWithNamespace generates a string that can be copy/pasted for use with oc get that includes
+// specifying the namespace with the -n option (e.g., `ConfigMap/cluster-config-v1 -n kube-system`).
+func FormatResourceForCLIWithNamespace(obj runtime.Object) string {
+ gvk := GuessObjectGroupVersionKind(obj)
+ kind := gvk.Kind
+ group := gvk.Group
+ var name, namespace string
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ name = ""
+ namespace = ""
+ } else {
+ name = accessor.GetName()
+ namespace = accessor.GetNamespace()
+ }
+ if len(group) > 0 {
+ group = "." + group
+ }
+ if len(namespace) > 0 {
+ namespace = " -n " + namespace
+ }
+ return kind + group + "/" + name + namespace
+}
+
+// FormatResourceForCLI generates a string that can be copy/pasted for use with oc get.
+func FormatResourceForCLI(obj runtime.Object) string {
+ gvk := GuessObjectGroupVersionKind(obj)
+ kind := gvk.Kind
+ group := gvk.Group
+ var name string
+ accessor, err := meta.Accessor(obj)
+ if err != nil {
+ name = ""
+ } else {
+ name = accessor.GetName()
+ }
+ if len(group) > 0 {
+ group = "." + group
+ }
+ return kind + group + "/" + name
+}
+
+// GuessObjectGroupVersionKind returns a human readable for the passed runtime object.
+func GuessObjectGroupVersionKind(object runtime.Object) schema.GroupVersionKind {
+ if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 {
+ return gvk
+ }
+ if kinds, _, _ := scheme.Scheme.ObjectKinds(object); len(kinds) > 0 {
+ return kinds[0]
+ }
+ if kinds, _, _ := openshiftScheme.ObjectKinds(object); len(kinds) > 0 {
+ return kinds[0]
+ }
+ return schema.GroupVersionKind{Kind: ""}
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go
new file mode 100644
index 0000000000..2fcfd13949
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go
@@ -0,0 +1,51 @@
+package resourcemerge
+
+import (
+ operatorsv1 "github.com/openshift/api/operator/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// ExpectedMutatingWebhooksConfiguration returns last applied generation for MutatingWebhookConfiguration resource registered in operator
+func ExpectedMutatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 {
+ generation := GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: "mutatingwebhookconfigurations"}, "", name)
+ if generation != nil {
+ return generation.LastGeneration
+ }
+ return -1
+}
+
+// SetMutatingWebhooksConfigurationGeneration updates operator generation status list with last applied generation for provided MutatingWebhookConfiguration resource
+func SetMutatingWebhooksConfigurationGeneration(generations *[]operatorsv1.GenerationStatus, actual *admissionregistrationv1.MutatingWebhookConfiguration) {
+ if actual == nil {
+ return
+ }
+ SetGeneration(generations, operatorsv1.GenerationStatus{
+ Group: admissionregistrationv1.SchemeGroupVersion.Group,
+ Resource: "mutatingwebhookconfigurations",
+ Name: actual.Name,
+ LastGeneration: actual.ObjectMeta.Generation,
+ })
+}
+
+// ExpectedValidatingWebhooksConfiguration returns last applied generation for ValidatingWebhookConfiguration resource registered in operator
+func ExpectedValidatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 {
+ generation := GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: "validatingwebhookconfigurations"}, "", name)
+ if generation != nil {
+ return generation.LastGeneration
+ }
+ return -1
+}
+
+// SetValidatingWebhooksConfigurationGeneration updates operator generation status list with last applied generation for provided ValidatingWebhookConfiguration resource
+func SetValidatingWebhooksConfigurationGeneration(generations *[]operatorsv1.GenerationStatus, actual *admissionregistrationv1.ValidatingWebhookConfiguration) {
+ if actual == nil {
+ return
+ }
+ SetGeneration(generations, operatorsv1.GenerationStatus{
+ Group: admissionregistrationv1.SchemeGroupVersion.Group,
+ Resource: "validatingwebhookconfigurations",
+ Name: actual.Name,
+ LastGeneration: actual.ObjectMeta.Generation,
+ })
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go
new file mode 100644
index 0000000000..754a5aabe8
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go
@@ -0,0 +1,68 @@
+package resourcemerge
+
+import (
+ "strings"
+
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ utilpointer "k8s.io/utils/pointer"
+)
+
+// EnsureCustomResourceDefinitionV1Beta1 ensures that the existing matches the required.
+// modified is set to true when existing had to be updated with required.
+func EnsureCustomResourceDefinitionV1Beta1(modified *bool, existing *apiextensionsv1beta1.CustomResourceDefinition, required apiextensionsv1beta1.CustomResourceDefinition) {
+ EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta)
+
+ // we stomp everything
+ if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) {
+ *modified = true
+ existing.Spec = required.Spec
+ }
+}
+
+// EnsureCustomResourceDefinitionV1 ensures that the existing matches the required.
+// modified is set to true when existing had to be updated with required.
+func EnsureCustomResourceDefinitionV1(modified *bool, existing *apiextensionsv1.CustomResourceDefinition, required apiextensionsv1.CustomResourceDefinition) {
+ EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta)
+
+ // we need to match defaults
+ mimicCRDV1Defaulting(&required)
+ // we stomp everything
+ if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) {
+ *modified = true
+ existing.Spec = required.Spec
+ }
+}
+
+func mimicCRDV1Defaulting(required *apiextensionsv1.CustomResourceDefinition) {
+ crd_SetDefaults_CustomResourceDefinitionSpec(&required.Spec)
+
+ if required.Spec.Conversion != nil &&
+ required.Spec.Conversion.Webhook != nil &&
+ required.Spec.Conversion.Webhook.ClientConfig != nil &&
+ required.Spec.Conversion.Webhook.ClientConfig.Service != nil {
+ crd_SetDefaults_ServiceReference(required.Spec.Conversion.Webhook.ClientConfig.Service)
+ }
+}
+
+// lifted from https://github.com/kubernetes/kubernetes/blob/v1.21.0/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go#L42-L61
+func crd_SetDefaults_CustomResourceDefinitionSpec(obj *apiextensionsv1.CustomResourceDefinitionSpec) {
+ if len(obj.Names.Singular) == 0 {
+ obj.Names.Singular = strings.ToLower(obj.Names.Kind)
+ }
+ if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 {
+ obj.Names.ListKind = obj.Names.Kind + "List"
+ }
+ if obj.Conversion == nil {
+ obj.Conversion = &apiextensionsv1.CustomResourceConversion{
+ Strategy: apiextensionsv1.NoneConverter,
+ }
+ }
+}
+
+func crd_SetDefaults_ServiceReference(obj *apiextensionsv1.ServiceReference) {
+ if obj.Port == nil {
+ obj.Port = utilpointer.Int32Ptr(443)
+ }
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go
new file mode 100644
index 0000000000..1731382e68
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go
@@ -0,0 +1,80 @@
+package resourcemerge
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ operatorsv1 "github.com/openshift/api/operator/v1"
+)
+
+func GenerationFor(generations []operatorsv1.GenerationStatus, resource schema.GroupResource, namespace, name string) *operatorsv1.GenerationStatus {
+ for i := range generations {
+ curr := &generations[i]
+ if curr.Namespace == namespace &&
+ curr.Name == name &&
+ curr.Group == resource.Group &&
+ curr.Resource == resource.Resource {
+
+ return curr
+ }
+ }
+
+ return nil
+}
+
+func SetGeneration(generations *[]operatorsv1.GenerationStatus, newGeneration operatorsv1.GenerationStatus) {
+ if generations == nil {
+ generations = &[]operatorsv1.GenerationStatus{}
+ }
+
+ existingGeneration := GenerationFor(*generations, schema.GroupResource{Group: newGeneration.Group, Resource: newGeneration.Resource}, newGeneration.Namespace, newGeneration.Name)
+ if existingGeneration == nil {
+ *generations = append(*generations, newGeneration)
+ return
+ }
+
+ existingGeneration.LastGeneration = newGeneration.LastGeneration
+ existingGeneration.Hash = newGeneration.Hash
+}
+
+func ExpectedDeploymentGeneration(required *appsv1.Deployment, previousGenerations []operatorsv1.GenerationStatus) int64 {
+ generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "deployments"}, required.Namespace, required.Name)
+ if generation != nil {
+ return generation.LastGeneration
+ }
+ return -1
+}
+
+func SetDeploymentGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.Deployment) {
+ if actual == nil {
+ return
+ }
+ SetGeneration(generations, operatorsv1.GenerationStatus{
+ Group: "apps",
+ Resource: "deployments",
+ Namespace: actual.Namespace,
+ Name: actual.Name,
+ LastGeneration: actual.ObjectMeta.Generation,
+ })
+}
+
+func ExpectedDaemonSetGeneration(required *appsv1.DaemonSet, previousGenerations []operatorsv1.GenerationStatus) int64 {
+ generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "daemonsets"}, required.Namespace, required.Name)
+ if generation != nil {
+ return generation.LastGeneration
+ }
+ return -1
+}
+
+func SetDaemonSetGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.DaemonSet) {
+ if actual == nil {
+ return
+ }
+ SetGeneration(generations, operatorsv1.GenerationStatus{
+ Group: "apps",
+ Resource: "daemonsets",
+ Namespace: actual.Namespace,
+ Name: actual.Name,
+ LastGeneration: actual.ObjectMeta.Generation,
+ })
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go
new file mode 100644
index 0000000000..f1e6d0c9fd
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go
@@ -0,0 +1,271 @@
+package resourcemerge
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "k8s.io/klog/v2"
+ "sigs.k8s.io/yaml"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ kyaml "k8s.io/apimachinery/pkg/util/yaml"
+)
+
+// MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other
+// It returns the resultant configmap and a bool indicating if any changes were made to the configmap
+func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) {
+ return MergePrunedConfigMap(nil, configMap, configKey, specialCases, configYAMLs...)
+}
+
+// MergePrunedConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other
+// It returns the resultant configmap and a bool indicating if any changes were made to the configmap.
+// It roundtrips the config through the given schema.
+func MergePrunedConfigMap(schema runtime.Object, configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) {
+ configBytes, err := MergePrunedProcessConfig(schema, specialCases, configYAMLs...)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if reflect.DeepEqual(configMap.Data[configKey], configBytes) {
+ return configMap, false, nil
+ }
+
+ ret := configMap.DeepCopy()
+ ret.Data[configKey] = string(configBytes)
+
+ return ret, true, nil
+}
+
+// MergeProcessConfig merges a series of config yaml files together with each later one overlaying all previous
+func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) {
+ currentConfigYAML := configYAMLs[0]
+
+ for _, currConfigYAML := range configYAMLs[1:] {
+ prevConfigJSON, err := kyaml.ToJSON(currentConfigYAML)
+ if err != nil {
+ klog.Warning(err)
+ // maybe it's just json
+ prevConfigJSON = currentConfigYAML
+ }
+ prevConfig := map[string]interface{}{}
+ if err := json.NewDecoder(bytes.NewBuffer(prevConfigJSON)).Decode(&prevConfig); err != nil {
+ return nil, err
+ }
+
+ if len(currConfigYAML) > 0 {
+ currConfigJSON, err := kyaml.ToJSON(currConfigYAML)
+ if err != nil {
+ klog.Warning(err)
+ // maybe it's just json
+ currConfigJSON = currConfigYAML
+ }
+ currConfig := map[string]interface{}{}
+ if err := json.NewDecoder(bytes.NewBuffer(currConfigJSON)).Decode(&currConfig); err != nil {
+ return nil, err
+ }
+
+ // protected against mismatched typemeta
+ prevAPIVersion, _, _ := unstructured.NestedString(prevConfig, "apiVersion")
+ prevKind, _, _ := unstructured.NestedString(prevConfig, "kind")
+ currAPIVersion, _, _ := unstructured.NestedString(currConfig, "apiVersion")
+ currKind, _, _ := unstructured.NestedString(currConfig, "kind")
+ currGVKSet := len(currAPIVersion) > 0 || len(currKind) > 0
+ gvkMismatched := currAPIVersion != prevAPIVersion || currKind != prevKind
+ if currGVKSet && gvkMismatched {
+ return nil, fmt.Errorf("%v/%v does not equal %v/%v", currAPIVersion, currKind, prevAPIVersion, prevKind)
+ }
+
+ if err := mergeConfig(prevConfig, currConfig, "", specialCases); err != nil {
+ return nil, err
+ }
+ }
+
+ currentConfigYAML, err = runtime.Encode(unstructured.UnstructuredJSONScheme, &unstructured.Unstructured{Object: prevConfig})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return currentConfigYAML, nil
+}
+
+// MergePrunedProcessConfig merges a series of config yaml files together with each later one overlaying all previous.
+// The result is roundtripped through the given schema if it is non-nil.
+func MergePrunedProcessConfig(schema runtime.Object, specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) {
+ bs, err := MergeProcessConfig(specialCases, configYAMLs...)
+ if err != nil {
+ return nil, err
+ }
+
+ if schema == nil {
+ return bs, nil
+ }
+
+ // roundtrip through the schema
+ typed := schema.DeepCopyObject()
+ if err := yaml.Unmarshal(bs, typed); err != nil {
+ return nil, err
+ }
+ typedBytes, err := json.Marshal(typed)
+ if err != nil {
+ return nil, err
+ }
+ var untypedJSON map[string]interface{}
+ if err := json.Unmarshal(typedBytes, &untypedJSON); err != nil {
+ return nil, err
+ }
+
+ // and intersect output with input because we cannot rely on omitempty in the schema
+ inputBytes, err := yaml.YAMLToJSON(bs)
+ if err != nil {
+ return nil, err
+ }
+ var inputJSON map[string]interface{}
+ if err := json.Unmarshal(inputBytes, &inputJSON); err != nil {
+ return nil, err
+ }
+ return json.Marshal(intersectJSON(inputJSON, untypedJSON))
+}
+
+type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error)
+
+var _ MergeFunc = RemoveConfig
+
+// RemoveConfig is a merge func that elimintes an entire path from the config
+func RemoveConfig(dst, src interface{}, currentPath string) (interface{}, error) {
+ return dst, nil
+}
+
+// mergeConfig overwrites entries in curr by additional. It modifies curr.
+func mergeConfig(curr, additional map[string]interface{}, currentPath string, specialCases map[string]MergeFunc) error {
+ for additionalKey, additionalVal := range additional {
+ fullKey := currentPath + "." + additionalKey
+ specialCase, ok := specialCases[fullKey]
+ if ok {
+ var err error
+ curr[additionalKey], err = specialCase(curr[additionalKey], additionalVal, currentPath)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ currVal, ok := curr[additionalKey]
+ if !ok {
+ curr[additionalKey] = additionalVal
+ continue
+ }
+
+ // only some scalars are accepted
+ switch castVal := additionalVal.(type) {
+ case map[string]interface{}:
+ currValAsMap, ok := currVal.(map[string]interface{})
+ if !ok {
+ currValAsMap = map[string]interface{}{}
+ curr[additionalKey] = currValAsMap
+ }
+
+ err := mergeConfig(currValAsMap, castVal, fullKey, specialCases)
+ if err != nil {
+ return err
+ }
+ continue
+
+ default:
+ if err := unstructured.SetNestedField(curr, castVal, additionalKey); err != nil {
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// jsonIntersection returns the intersection of both JSON object,
+// preferring the values of the first argument.
+func intersectJSON(x1, x2 map[string]interface{}) map[string]interface{} {
+ if x1 == nil || x2 == nil {
+ return nil
+ }
+ ret := map[string]interface{}{}
+ for k, v1 := range x1 {
+ v2, ok := x2[k]
+ if !ok {
+ continue
+ }
+ ret[k] = intersectValue(v1, v2)
+ }
+ return ret
+}
+
+func intersectArray(x1, x2 []interface{}) []interface{} {
+ if x1 == nil || x2 == nil {
+ return nil
+ }
+ ret := make([]interface{}, 0, len(x1))
+ for i := range x1 {
+ if i >= len(x2) {
+ break
+ }
+ ret = append(ret, intersectValue(x1[i], x2[i]))
+ }
+ return ret
+}
+
+func intersectValue(x1, x2 interface{}) interface{} {
+ switch x1 := x1.(type) {
+ case map[string]interface{}:
+ x2, ok := x2.(map[string]interface{})
+ if !ok {
+ return x1
+ }
+ return intersectJSON(x1, x2)
+ case []interface{}:
+ x2, ok := x2.([]interface{})
+ if !ok {
+ return x1
+ }
+ return intersectArray(x1, x2)
+ default:
+ return x1
+ }
+}
+
+// IsRequiredConfigPresent can check an observedConfig to see if certain required paths are present in that config.
+// This allows operators to require certain configuration to be observed before proceeding to honor a configuration or roll it out.
+func IsRequiredConfigPresent(config []byte, requiredPaths [][]string) error {
+ if len(config) == 0 {
+ return fmt.Errorf("no observedConfig")
+ }
+
+ existingConfig := map[string]interface{}{}
+ if err := json.NewDecoder(bytes.NewBuffer(config)).Decode(&existingConfig); err != nil {
+ return fmt.Errorf("error parsing config, %v", err)
+ }
+
+ for _, requiredPath := range requiredPaths {
+ configVal, found, err := unstructured.NestedFieldNoCopy(existingConfig, requiredPath...)
+ if err != nil {
+ return fmt.Errorf("error reading %v from config, %v", strings.Join(requiredPath, "."), err)
+ }
+ if !found {
+ return fmt.Errorf("%v missing from config", strings.Join(requiredPath, "."))
+ }
+ if configVal == nil {
+ return fmt.Errorf("%v null in config", strings.Join(requiredPath, "."))
+ }
+ if configValSlice, ok := configVal.([]interface{}); ok && len(configValSlice) == 0 {
+ return fmt.Errorf("%v empty in config", strings.Join(requiredPath, "."))
+ }
+ if configValString, ok := configVal.(string); ok && len(configValString) == 0 {
+ return fmt.Errorf("%v empty in config", strings.Join(requiredPath, "."))
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go
new file mode 100644
index 0000000000..4881c4b8a8
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go
@@ -0,0 +1,277 @@
+package resourcemerge
+
+import (
+ "reflect"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// EnsureObjectMeta writes namespace, name, labels, and annotations. Don't set other things here.
+// TODO finalizer support maybe?
+func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) {
+ SetStringIfSet(modified, &existing.Namespace, required.Namespace)
+ SetStringIfSet(modified, &existing.Name, required.Name)
+ MergeMap(modified, &existing.Labels, required.Labels)
+ MergeMap(modified, &existing.Annotations, required.Annotations)
+ MergeOwnerRefs(modified, &existing.OwnerReferences, required.OwnerReferences)
+}
+
+// WithCleanLabelsAndAnnotations cleans the metadata off the removal annotations/labels/ownerrefs
+// (those that end with trailing "-")
+func WithCleanLabelsAndAnnotations(obj metav1.Object) metav1.Object {
+ obj.SetAnnotations(cleanRemovalKeys(obj.GetAnnotations()))
+ obj.SetLabels(cleanRemovalKeys(obj.GetLabels()))
+ obj.SetOwnerReferences(cleanRemovalOwnerRefs(obj.GetOwnerReferences()))
+ return obj
+}
+
+func cleanRemovalKeys(required map[string]string) map[string]string {
+ for k := range required {
+ if strings.HasSuffix(k, "-") {
+ delete(required, k)
+ }
+ }
+ return required
+}
+
+func stringPtr(val string) *string {
+ return &val
+}
+
+func SetString(modified *bool, existing *string, required string) {
+ if required != *existing {
+ *existing = required
+ *modified = true
+ }
+}
+
+func SetStringIfSet(modified *bool, existing *string, required string) {
+ if len(required) == 0 {
+ return
+ }
+ if required != *existing {
+ *existing = required
+ *modified = true
+ }
+}
+
+func setStringPtr(modified *bool, existing **string, required *string) {
+ if *existing == nil || (required == nil && *existing != nil) {
+ *modified = true
+ *existing = required
+ return
+ }
+ SetString(modified, *existing, *required)
+}
+
+func SetStringSlice(modified *bool, existing *[]string, required []string) {
+ if !reflect.DeepEqual(required, *existing) {
+ *existing = required
+ *modified = true
+ }
+}
+
+func SetStringSliceIfSet(modified *bool, existing *[]string, required []string) {
+ if required == nil {
+ return
+ }
+ if !reflect.DeepEqual(required, *existing) {
+ *existing = required
+ *modified = true
+ }
+}
+
+func BoolPtr(val bool) *bool {
+ return &val
+}
+
+func SetBool(modified *bool, existing *bool, required bool) {
+ if required != *existing {
+ *existing = required
+ *modified = true
+ }
+}
+
+func setBoolPtr(modified *bool, existing **bool, required *bool) {
+ if *existing == nil || (required == nil && *existing != nil) {
+ *modified = true
+ *existing = required
+ return
+ }
+ SetBool(modified, *existing, *required)
+}
+
+func int64Ptr(val int64) *int64 {
+ return &val
+}
+
+func SetInt32(modified *bool, existing *int32, required int32) {
+ if required != *existing {
+ *existing = required
+ *modified = true
+ }
+}
+
+func SetInt32IfSet(modified *bool, existing *int32, required int32) {
+ if required == 0 {
+ return
+ }
+
+ SetInt32(modified, existing, required)
+}
+
+func SetInt64(modified *bool, existing *int64, required int64) {
+ if required != *existing {
+ *existing = required
+ *modified = true
+ }
+}
+
+func setInt64Ptr(modified *bool, existing **int64, required *int64) {
+ if *existing == nil || (required == nil && *existing != nil) {
+ *modified = true
+ *existing = required
+ return
+ }
+ SetInt64(modified, *existing, *required)
+}
+
+func MergeMap(modified *bool, existing *map[string]string, required map[string]string) {
+ if *existing == nil {
+ *existing = map[string]string{}
+ }
+ for k, v := range required {
+ actualKey := k
+ removeKey := false
+
+ // if "required" map contains a key with "-" as suffix, remove that
+ // key from the existing map instead of replacing the value
+ if strings.HasSuffix(k, "-") {
+ removeKey = true
+ actualKey = strings.TrimRight(k, "-")
+ }
+
+ if existingV, ok := (*existing)[actualKey]; removeKey {
+ if !ok {
+ continue
+ }
+ // value found -> it should be removed
+ delete(*existing, actualKey)
+ *modified = true
+
+ } else if !ok || v != existingV {
+ *modified = true
+ (*existing)[actualKey] = v
+ }
+ }
+}
+
+func SetMapStringString(modified *bool, existing *map[string]string, required map[string]string) {
+ if *existing == nil {
+ *existing = map[string]string{}
+ }
+
+ if !reflect.DeepEqual(*existing, required) {
+ *existing = required
+ }
+}
+
+func SetMapStringStringIfSet(modified *bool, existing *map[string]string, required map[string]string) {
+ if required == nil {
+ return
+ }
+ if *existing == nil {
+ *existing = map[string]string{}
+ }
+
+ if !reflect.DeepEqual(*existing, required) {
+ *existing = required
+ }
+}
+
+func MergeOwnerRefs(modified *bool, existing *[]metav1.OwnerReference, required []metav1.OwnerReference) {
+ if *existing == nil {
+ *existing = []metav1.OwnerReference{}
+ }
+
+ for _, o := range required {
+ removeOwner := false
+
+ // if "required" ownerRefs contain an owner.UID with "-" as suffix, remove that
+ // ownerRef from the existing ownerRefs instead of replacing the value
+ // NOTE: this is the same format as kubectl annotate and kubectl label
+ if strings.HasSuffix(string(o.UID), "-") {
+ removeOwner = true
+ }
+
+ existedIndex := 0
+
+ for existedIndex < len(*existing) {
+ if ownerRefMatched(o, (*existing)[existedIndex]) {
+ break
+ }
+ existedIndex++
+ }
+
+ if existedIndex == len(*existing) {
+ // There is no matched ownerref found, append the ownerref
+ // if it is not to be removed.
+ if !removeOwner {
+ *existing = append(*existing, o)
+ *modified = true
+ }
+ continue
+ }
+
+ if removeOwner {
+ *existing = append((*existing)[:existedIndex], (*existing)[existedIndex+1:]...)
+ *modified = true
+ continue
+ }
+
+ if !reflect.DeepEqual(o, (*existing)[existedIndex]) {
+ (*existing)[existedIndex] = o
+ *modified = true
+ }
+ }
+}
+
+func ownerRefMatched(existing, required metav1.OwnerReference) bool {
+ if existing.Name != required.Name {
+ return false
+ }
+
+ if existing.Kind != required.Kind {
+ return false
+ }
+
+ existingGV, err := schema.ParseGroupVersion(existing.APIVersion)
+
+ if err != nil {
+ return false
+ }
+
+ requiredGV, err := schema.ParseGroupVersion(required.APIVersion)
+
+ if err != nil {
+ return false
+ }
+
+ if existingGV.Group != requiredGV.Group {
+ return false
+ }
+
+ return true
+}
+
+func cleanRemovalOwnerRefs(required []metav1.OwnerReference) []metav1.OwnerReference {
+ for k := 0; k < len(required); k++ {
+ if strings.HasSuffix(string(required[k].UID), "-") {
+ required = append(required[:k], required[k+1:]...)
+ k--
+ }
+ }
+ return required
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go
new file mode 100644
index 0000000000..7c69478ea6
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go
@@ -0,0 +1,35 @@
+package resourceread
+
+import (
+ admissionv1 "k8s.io/api/admissionregistration/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var (
+ admissionScheme = runtime.NewScheme()
+ admissionCodecs = serializer.NewCodecFactory(admissionScheme)
+)
+
+func init() {
+ utilruntime.Must(admissionv1.AddToScheme(admissionScheme))
+}
+
+func ReadValidatingWebhookConfigurationV1OrDie(objBytes []byte) *admissionv1.ValidatingWebhookConfiguration {
+ requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+
+ return requiredObj.(*admissionv1.ValidatingWebhookConfiguration)
+}
+
+func ReadMutatingWebhookConfigurationV1OrDie(objBytes []byte) *admissionv1.MutatingWebhookConfiguration {
+ requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+
+ return requiredObj.(*admissionv1.MutatingWebhookConfiguration)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go
new file mode 100644
index 0000000000..e21f774e1e
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go
@@ -0,0 +1,35 @@
+package resourceread
+
+import (
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var (
+ apiExtensionsScheme = runtime.NewScheme()
+ apiExtensionsCodecs = serializer.NewCodecFactory(apiExtensionsScheme)
+)
+
+func init() {
+ utilruntime.Must(apiextensionsv1beta1.AddToScheme(apiExtensionsScheme))
+ utilruntime.Must(apiextensionsv1.AddToScheme(apiExtensionsScheme))
+}
+
+func ReadCustomResourceDefinitionV1Beta1OrDie(objBytes []byte) *apiextensionsv1beta1.CustomResourceDefinition {
+ requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextensionsv1beta1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*apiextensionsv1beta1.CustomResourceDefinition)
+}
+
+func ReadCustomResourceDefinitionV1OrDie(objBytes []byte) *apiextensionsv1.CustomResourceDefinition {
+ requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextensionsv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*apiextensionsv1.CustomResourceDefinition)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go
new file mode 100644
index 0000000000..8490017e1c
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go
@@ -0,0 +1,34 @@
+package resourceread
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+var (
+ appsScheme = runtime.NewScheme()
+ appsCodecs = serializer.NewCodecFactory(appsScheme)
+)
+
+func init() {
+ if err := appsv1.AddToScheme(appsScheme); err != nil {
+ panic(err)
+ }
+}
+
+func ReadDeploymentV1OrDie(objBytes []byte) *appsv1.Deployment {
+ requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*appsv1.Deployment)
+}
+
+func ReadDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet {
+ requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*appsv1.DaemonSet)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go
new file mode 100644
index 0000000000..daa27c7b50
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go
@@ -0,0 +1,78 @@
+package resourceread
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+var (
+ coreScheme = runtime.NewScheme()
+ coreCodecs = serializer.NewCodecFactory(coreScheme)
+)
+
+func init() {
+ if err := corev1.AddToScheme(coreScheme); err != nil {
+ panic(err)
+ }
+}
+
+func ReadConfigMapV1OrDie(objBytes []byte) *corev1.ConfigMap {
+ requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*corev1.ConfigMap)
+}
+
+func ReadSecretV1OrDie(objBytes []byte) *corev1.Secret {
+ requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*corev1.Secret)
+}
+
+func ReadNamespaceV1OrDie(objBytes []byte) *corev1.Namespace {
+ requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*corev1.Namespace)
+}
+
+func ReadServiceAccountV1OrDie(objBytes []byte) *corev1.ServiceAccount {
+ requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*corev1.ServiceAccount)
+}
+
+func ReadServiceV1OrDie(objBytes []byte) *corev1.Service {
+ requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*corev1.Service)
+}
+
+func ReadPodV1OrDie(objBytes []byte) *corev1.Pod {
+ requiredObj, err := ReadPodV1(objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj
+}
+
+func ReadPodV1(objBytes []byte) (*corev1.Pod, error) {
+ requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ return nil, err
+ }
+ return requiredObj.(*corev1.Pod), nil
+}
+
+func WritePodV1OrDie(obj *corev1.Pod) string {
+ return runtime.EncodeOrDie(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), obj)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/generic.go
new file mode 100644
index 0000000000..b62fb2b641
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/generic.go
@@ -0,0 +1,57 @@
+package resourceread
+
+import (
+ "github.com/openshift/api"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/client-go/kubernetes/scheme"
+ migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+)
+
+var (
+ genericScheme = runtime.NewScheme()
+ genericCodecs = serializer.NewCodecFactory(genericScheme)
+ genericCodec = genericCodecs.UniversalDeserializer()
+)
+
+func init() {
+ utilruntime.Must(api.Install(genericScheme))
+ utilruntime.Must(api.InstallKube(genericScheme))
+ utilruntime.Must(apiextensionsv1beta1.AddToScheme(genericScheme))
+ utilruntime.Must(apiextensionsv1.AddToScheme(genericScheme))
+ utilruntime.Must(migrationv1alpha1.AddToScheme(genericScheme))
+ utilruntime.Must(admissionregistrationv1.AddToScheme(genericScheme))
+}
+
+// ReadGenericWithUnstructured parses given yaml file using known scheme (see genericScheme above).
+// If the object kind is not registered in the scheme, it returns Unstructured as the last resort.
+func ReadGenericWithUnstructured(objBytes []byte) (runtime.Object, error) {
+ // Try to get a typed object first
+ typedObj, _, decodeErr := genericCodec.Decode(objBytes, nil, nil)
+ if decodeErr == nil {
+ return typedObj, nil
+ }
+
+ // Try unstructured, hoping to recover from "no kind XXX is registered for version YYY"
+ unstructuredObj, _, err := scheme.Codecs.UniversalDecoder().Decode(objBytes, nil, &unstructured.Unstructured{})
+ if err != nil {
+ // Return the original error
+ return nil, decodeErr
+ }
+ return unstructuredObj, nil
+}
+
+// ReadGenericWithUnstructuredOrDie parses given yaml file using known scheme (see genericScheme above).
+// If the object kind is not registered in the scheme, it returns Unstructured as the last resort.
+func ReadGenericWithUnstructuredOrDie(objBytes []byte) runtime.Object {
+ obj, err := ReadGenericWithUnstructured(objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return obj
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/images.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/images.go
new file mode 100644
index 0000000000..62a80d1284
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/images.go
@@ -0,0 +1,26 @@
+package resourceread
+
+import (
+ imagev1 "github.com/openshift/api/image/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+var (
+ imagesScheme = runtime.NewScheme()
+ imagesCodecs = serializer.NewCodecFactory(imagesScheme)
+)
+
+func init() {
+ if err := imagev1.AddToScheme(imagesScheme); err != nil {
+ panic(err)
+ }
+}
+
+func ReadImageStreamV1OrDie(objBytes []byte) *imagev1.ImageStream {
+ requiredObj, err := runtime.Decode(imagesCodecs.UniversalDecoder(imagev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*imagev1.ImageStream)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go
new file mode 100644
index 0000000000..71b6074c92
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go
@@ -0,0 +1,26 @@
+package resourceread
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+)
+
+var (
+ migrationScheme = runtime.NewScheme()
+ migrationCodecs = serializer.NewCodecFactory(migrationScheme)
+)
+
+func init() {
+ if err := migrationv1alpha1.AddToScheme(migrationScheme); err != nil {
+ panic(err)
+ }
+}
+
+func ReadStorageVersionMigrationV1Alpha1OrDie(objBytes []byte) *migrationv1alpha1.StorageVersionMigration {
+ requiredObj, err := runtime.Decode(migrationCodecs.UniversalDecoder(migrationv1alpha1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*migrationv1alpha1.StorageVersionMigration)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go
new file mode 100644
index 0000000000..fe058fdc6e
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go
@@ -0,0 +1,25 @@
+package resourceread
+
+import (
+ policyv1 "k8s.io/api/policy/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var (
+ policyScheme = runtime.NewScheme()
+ policyCodecs = serializer.NewCodecFactory(policyScheme)
+)
+
+func init() {
+ utilruntime.Must(policyv1.AddToScheme(policyScheme))
+}
+
+func ReadPodDisruptionBudgetV1OrDie(objBytes []byte) *policyv1.PodDisruptionBudget {
+ requiredObj, err := runtime.Decode(policyCodecs.UniversalDecoder(policyv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*policyv1.PodDisruptionBudget)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go
new file mode 100644
index 0000000000..bf14899d88
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go
@@ -0,0 +1,50 @@
+package resourceread
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+var (
+ rbacScheme = runtime.NewScheme()
+ rbacCodecs = serializer.NewCodecFactory(rbacScheme)
+)
+
+func init() {
+ if err := rbacv1.AddToScheme(rbacScheme); err != nil {
+ panic(err)
+ }
+}
+
+func ReadClusterRoleBindingV1OrDie(objBytes []byte) *rbacv1.ClusterRoleBinding {
+ requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*rbacv1.ClusterRoleBinding)
+}
+
+func ReadClusterRoleV1OrDie(objBytes []byte) *rbacv1.ClusterRole {
+ requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*rbacv1.ClusterRole)
+}
+
+func ReadRoleBindingV1OrDie(objBytes []byte) *rbacv1.RoleBinding {
+ requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*rbacv1.RoleBinding)
+}
+
+func ReadRoleV1OrDie(objBytes []byte) *rbacv1.Role {
+ requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*rbacv1.Role)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/route.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/route.go
new file mode 100644
index 0000000000..08e125892b
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/route.go
@@ -0,0 +1,26 @@
+package resourceread
+
+import (
+ routev1 "github.com/openshift/api/route/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+var (
+ routeScheme = runtime.NewScheme()
+ routeCodecs = serializer.NewCodecFactory(routeScheme)
+)
+
+func init() {
+ if err := routev1.AddToScheme(routeScheme); err != nil {
+ panic(err)
+ }
+}
+
+func ReadRouteV1OrDie(objBytes []byte) *routev1.Route {
+ requiredObj, err := runtime.Decode(routeCodecs.UniversalDecoder(routev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*routev1.Route)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go
new file mode 100644
index 0000000000..6a7d51ee7b
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go
@@ -0,0 +1,43 @@
+package resourceread
+
+import (
+ storagev1 "k8s.io/api/storage/v1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var (
+ storageScheme = runtime.NewScheme()
+ storageCodecs = serializer.NewCodecFactory(storageScheme)
+)
+
+func init() {
+ utilruntime.Must(storagev1.AddToScheme(storageScheme))
+ utilruntime.Must(storagev1beta1.AddToScheme(storageScheme))
+}
+
+func ReadStorageClassV1OrDie(objBytes []byte) *storagev1.StorageClass {
+ requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*storagev1.StorageClass)
+}
+
+func ReadCSIDriverV1Beta1OrDie(objBytes []byte) *storagev1beta1.CSIDriver {
+ requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1beta1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*storagev1beta1.CSIDriver)
+}
+
+func ReadCSIDriverV1OrDie(objBytes []byte) *storagev1.CSIDriver {
+ requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1.SchemeGroupVersion), objBytes)
+ if err != nil {
+ panic(err)
+ }
+ return requiredObj.(*storagev1.CSIDriver)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/unstructured.go
new file mode 100644
index 0000000000..bf6bfb0105
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/unstructured.go
@@ -0,0 +1,18 @@
+package resourceread
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/client-go/kubernetes/scheme"
+)
+
+func ReadCredentialRequestsOrDie(objBytes []byte) *unstructured.Unstructured {
+ return ReadUnstructuredOrDie(objBytes)
+}
+
+func ReadUnstructuredOrDie(objBytes []byte) *unstructured.Unstructured {
+ udi, _, err := scheme.Codecs.UniversalDecoder().Decode(objBytes, nil, &unstructured.Unstructured{})
+ if err != nil {
+ panic(err)
+ }
+ return udi.(*unstructured.Unstructured)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go
new file mode 100644
index 0000000000..f5a26338b7
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go
@@ -0,0 +1,67 @@
+package resourcesynccontroller
+
+import (
+ "crypto/x509"
+ "fmt"
+ "reflect"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+ "k8s.io/client-go/util/cert"
+
+ "github.com/openshift/library-go/pkg/crypto"
+)
+
+func CombineCABundleConfigMaps(destinationConfigMap ResourceLocation, lister corev1listers.ConfigMapLister, inputConfigMaps ...ResourceLocation) (*corev1.ConfigMap, error) {
+ certificates := []*x509.Certificate{}
+ for _, input := range inputConfigMaps {
+ inputConfigMap, err := lister.ConfigMaps(input.Namespace).Get(input.Name)
+ if apierrors.IsNotFound(err) {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // configmaps must conform to this
+ inputContent := inputConfigMap.Data["ca-bundle.crt"]
+ if len(inputContent) == 0 {
+ continue
+ }
+ inputCerts, err := cert.ParseCertsPEM([]byte(inputContent))
+ if err != nil {
+ return nil, fmt.Errorf("configmap/%s in %q is malformed: %v", input.Name, input.Namespace, err)
+ }
+ certificates = append(certificates, inputCerts...)
+ }
+
+ certificates = crypto.FilterExpiredCerts(certificates...)
+ finalCertificates := []*x509.Certificate{}
+ // now check for duplicates. n^2, but super simple
+ for i := range certificates {
+ found := false
+ for j := range finalCertificates {
+ if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ finalCertificates = append(finalCertificates, certificates[i])
+ }
+ }
+
+ caBytes, err := crypto.EncodeCertificates(finalCertificates...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Namespace: destinationConfigMap.Namespace, Name: destinationConfigMap.Name},
+ Data: map[string]string{
+ "ca-bundle.crt": string(caBytes),
+ },
+ }, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go
new file mode 100644
index 0000000000..c53af8bdf4
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go
@@ -0,0 +1,41 @@
+package resourcesynccontroller
+
+import "k8s.io/apimachinery/pkg/util/sets"
+
+// ResourceLocation describes coordinates for a resource to be synced
+type ResourceLocation struct {
+ Namespace string `json:"namespace"`
+ Name string `json:"name"`
+
+ // Provider if set for the source location enhance the error message to point to the component which
+ // provide this resource.
+ Provider string `json:"provider,omitempty"`
+}
+
+// PreconditionsFulfilled is a function that indicates whether all prerequisites
+// are met and a resource can be synced.
+type preconditionsFulfilled func() (bool, error)
+
+func alwaysFulfilledPreconditions() (bool, error) { return true, nil }
+
+type syncRuleSource struct {
+ ResourceLocation
+ syncedKeys sets.String // defines the set of keys to sync from source to dest
+ preconditionsFulfilledFn preconditionsFulfilled // preconditions to fulfill before syncing the resource
+}
+
+type syncRules map[ResourceLocation]syncRuleSource
+
+var (
+ emptyResourceLocation = ResourceLocation{}
+)
+
+// ResourceSyncer allows changes to syncing rules by this controller
+type ResourceSyncer interface {
+ // SyncConfigMap indicates that a configmap should be copied from the source to the destination. It will also
+ // mirror a deletion from the source. If the source is a zero object the destination will be deleted.
+ SyncConfigMap(destination, source ResourceLocation) error
+ // SyncSecret indicates that a secret should be copied from the source to the destination. It will also
+ // mirror a deletion from the source. If the source is a zero object the destination will be deleted.
+ SyncSecret(destination, source ResourceLocation) error
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go
new file mode 100644
index 0000000000..02cdedb174
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go
@@ -0,0 +1,340 @@
+package resourcesynccontroller
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+
+ "github.com/openshift/library-go/pkg/controller/factory"
+ "github.com/openshift/library-go/pkg/operator/condition"
+ "github.com/openshift/library-go/pkg/operator/events"
+ "github.com/openshift/library-go/pkg/operator/management"
+ "github.com/openshift/library-go/pkg/operator/resource/resourceapply"
+ "github.com/openshift/library-go/pkg/operator/v1helpers"
+)
+
+// ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations.
+// It will also mirror deletions by deleting destinations.
+type ResourceSyncController struct {
+ name string
+ // syncRuleLock is used to ensure we avoid races on changes to syncing rules
+ syncRuleLock sync.RWMutex
+ // configMapSyncRules is a map from destination location to source location
+ configMapSyncRules syncRules
+ // secretSyncRules is a map from destination location to source location
+ secretSyncRules syncRules
+
+ // knownNamespaces is the list of namespaces we are watching.
+ knownNamespaces sets.String
+
+ configMapGetter corev1client.ConfigMapsGetter
+ secretGetter corev1client.SecretsGetter
+ kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces
+ operatorConfigClient v1helpers.OperatorClient
+
+ runFn func(ctx context.Context, workers int)
+ syncCtx factory.SyncContext
+}
+
+var _ ResourceSyncer = &ResourceSyncController{}
+var _ factory.Controller = &ResourceSyncController{}
+
+// NewResourceSyncController creates ResourceSyncController.
+func NewResourceSyncController(
+ operatorConfigClient v1helpers.OperatorClient,
+ kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces,
+ secretsGetter corev1client.SecretsGetter,
+ configMapsGetter corev1client.ConfigMapsGetter,
+ eventRecorder events.Recorder,
+) *ResourceSyncController {
+ c := &ResourceSyncController{
+ name: "ResourceSyncController",
+ operatorConfigClient: operatorConfigClient,
+
+ configMapSyncRules: syncRules{},
+ secretSyncRules: syncRules{},
+ kubeInformersForNamespaces: kubeInformersForNamespaces,
+ knownNamespaces: kubeInformersForNamespaces.Namespaces(),
+
+ configMapGetter: v1helpers.CachedConfigMapGetter(configMapsGetter, kubeInformersForNamespaces),
+ secretGetter: v1helpers.CachedSecretGetter(secretsGetter, kubeInformersForNamespaces),
+ syncCtx: factory.NewSyncContext("ResourceSyncController", eventRecorder.WithComponentSuffix("resource-sync-controller")),
+ }
+
+ informers := []factory.Informer{
+ operatorConfigClient.Informer(),
+ }
+ for namespace := range kubeInformersForNamespaces.Namespaces() {
+ if len(namespace) == 0 {
+ continue
+ }
+ informer := kubeInformersForNamespaces.InformersFor(namespace)
+ informers = append(informers, informer.Core().V1().ConfigMaps().Informer())
+ informers = append(informers, informer.Core().V1().Secrets().Informer())
+ }
+
+ f := factory.New().WithSync(c.Sync).WithSyncContext(c.syncCtx).WithInformers(informers...).ResyncEvery(time.Minute).ToController(c.name, eventRecorder.WithComponentSuffix("resource-sync-controller"))
+ c.runFn = f.Run
+
+ return c
+}
+
+func (c *ResourceSyncController) Run(ctx context.Context, workers int) {
+ c.runFn(ctx, workers)
+}
+
+func (c *ResourceSyncController) Name() string {
+ return c.name
+}
+
+func (c *ResourceSyncController) SyncConfigMap(destination, source ResourceLocation) error {
+ return c.syncConfigMap(destination, source, alwaysFulfilledPreconditions)
+}
+
+func (c *ResourceSyncController) SyncPartialConfigMap(destination ResourceLocation, source ResourceLocation, keys ...string) error {
+ return c.syncConfigMap(destination, source, alwaysFulfilledPreconditions, keys...)
+}
+
+// SyncConfigMapConditionally adds a new configmap that the resource sync
+// controller will synchronise if the given precondition is fulfilled.
+func (c *ResourceSyncController) SyncConfigMapConditionally(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled) error {
+ return c.syncConfigMap(destination, source, preconditionsFulfilledFn)
+}
+
+func (c *ResourceSyncController) syncConfigMap(destination ResourceLocation, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled, keys ...string) error {
+ if !c.knownNamespaces.Has(destination.Namespace) {
+ return fmt.Errorf("not watching namespace %q", destination.Namespace)
+ }
+ if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) {
+ return fmt.Errorf("not watching namespace %q", source.Namespace)
+ }
+
+ c.syncRuleLock.Lock()
+ defer c.syncRuleLock.Unlock()
+ c.configMapSyncRules[destination] = syncRuleSource{
+ ResourceLocation: source,
+ syncedKeys: sets.NewString(keys...),
+ preconditionsFulfilledFn: preconditionsFulfilledFn,
+ }
+
+ // make sure the new rule is picked up
+ c.syncCtx.Queue().Add(c.syncCtx.QueueKey())
+ return nil
+}
+
+func (c *ResourceSyncController) SyncSecret(destination, source ResourceLocation) error {
+ return c.syncSecret(destination, source, alwaysFulfilledPreconditions)
+}
+
+func (c *ResourceSyncController) SyncPartialSecret(destination, source ResourceLocation, keys ...string) error {
+ return c.syncSecret(destination, source, alwaysFulfilledPreconditions, keys...)
+}
+
+// SyncSecretConditionally adds a new secret that the resource sync controller
+// will synchronise if the given precondition is fulfilled.
+func (c *ResourceSyncController) SyncSecretConditionally(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled) error {
+ return c.syncSecret(destination, source, preconditionsFulfilledFn)
+}
+
+func (c *ResourceSyncController) syncSecret(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled, keys ...string) error {
+ if !c.knownNamespaces.Has(destination.Namespace) {
+ return fmt.Errorf("not watching namespace %q", destination.Namespace)
+ }
+ if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) {
+ return fmt.Errorf("not watching namespace %q", source.Namespace)
+ }
+
+ c.syncRuleLock.Lock()
+ defer c.syncRuleLock.Unlock()
+ c.secretSyncRules[destination] = syncRuleSource{
+ ResourceLocation: source,
+ syncedKeys: sets.NewString(keys...),
+ preconditionsFulfilledFn: preconditionsFulfilledFn,
+ }
+
+ // make sure the new rule is picked up
+ c.syncCtx.Queue().Add(c.syncCtx.QueueKey())
+ return nil
+}
+
+// errorWithProvider provides a finger of blame in case a source resource cannot be retrieved.
+func errorWithProvider(provider string, err error) error {
+ if len(provider) > 0 {
+ return fmt.Errorf("%w (check the %q that is supposed to provide this resource)", err, provider)
+ }
+ return err
+}
+
+func (c *ResourceSyncController) Sync(ctx context.Context, syncCtx factory.SyncContext) error {
+ operatorSpec, _, _, err := c.operatorConfigClient.GetOperatorState()
+ if err != nil {
+ return err
+ }
+
+ if !management.IsOperatorManaged(operatorSpec.ManagementState) {
+ return nil
+ }
+
+ c.syncRuleLock.RLock()
+ defer c.syncRuleLock.RUnlock()
+
+ errors := []error{}
+
+ for destination, source := range c.configMapSyncRules {
+ // skip the sync if the preconditions aren't fulfilled
+ if fulfilled, err := source.preconditionsFulfilledFn(); !fulfilled || err != nil {
+ if err != nil {
+ errors = append(errors, err)
+ }
+ continue
+ }
+
+ if source.ResourceLocation == emptyResourceLocation {
+ // use the cache to check whether the configmap exists in target namespace, if not skip the extra delete call.
+ if _, err := c.configMapGetter.ConfigMaps(destination.Namespace).Get(ctx, destination.Name, metav1.GetOptions{}); err != nil {
+ if !apierrors.IsNotFound(err) {
+ errors = append(errors, err)
+ }
+ continue
+ }
+ if err := c.configMapGetter.ConfigMaps(destination.Namespace).Delete(ctx, destination.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
+ errors = append(errors, err)
+ }
+ continue
+ }
+
+ _, _, err := resourceapply.SyncPartialConfigMap(ctx, c.configMapGetter, syncCtx.Recorder(), source.Namespace, source.Name, destination.Namespace, destination.Name, source.syncedKeys, []metav1.OwnerReference{})
+ if err != nil {
+ errors = append(errors, errorWithProvider(source.Provider, err))
+ }
+ }
+ for destination, source := range c.secretSyncRules {
+ // skip the sync if the preconditions aren't fulfilled
+ if fulfilled, err := source.preconditionsFulfilledFn(); !fulfilled || err != nil {
+ if err != nil {
+ errors = append(errors, err)
+ }
+ continue
+ }
+
+ if source.ResourceLocation == emptyResourceLocation {
+ // use the cache to check whether the secret exists in target namespace, if not skip the extra delete call.
+ if _, err := c.secretGetter.Secrets(destination.Namespace).Get(ctx, destination.Name, metav1.GetOptions{}); err != nil {
+ if !apierrors.IsNotFound(err) {
+ errors = append(errors, err)
+ }
+ continue
+ }
+ if err := c.secretGetter.Secrets(destination.Namespace).Delete(ctx, destination.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
+ errors = append(errors, err)
+ }
+ continue
+ }
+
+ _, _, err := resourceapply.SyncPartialSecret(ctx, c.secretGetter, syncCtx.Recorder(), source.Namespace, source.Name, destination.Namespace, destination.Name, source.syncedKeys, []metav1.OwnerReference{})
+ if err != nil {
+ errors = append(errors, errorWithProvider(source.Provider, err))
+ }
+ }
+
+ if len(errors) > 0 {
+ cond := operatorv1.OperatorCondition{
+ Type: condition.ResourceSyncControllerDegradedConditionType,
+ Status: operatorv1.ConditionTrue,
+ Reason: "Error",
+ Message: v1helpers.NewMultiLineAggregate(errors).Error(),
+ }
+ if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil {
+ return updateError
+ }
+ return nil
+ }
+
+ cond := operatorv1.OperatorCondition{
+ Type: condition.ResourceSyncControllerDegradedConditionType,
+ Status: operatorv1.ConditionFalse,
+ }
+ if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil {
+ return updateError
+ }
+ return nil
+}
+
+func NewDebugHandler(controller *ResourceSyncController) http.Handler {
+ return &debugHTTPHandler{controller: controller}
+}
+
+type debugHTTPHandler struct {
+ controller *ResourceSyncController
+}
+
+type ResourceSyncRule struct {
+ Destination ResourceLocation `json:"destination"`
+ Source syncRuleSource `json:"source"`
+}
+
+type ResourceSyncRuleList []ResourceSyncRule
+
+func (l ResourceSyncRuleList) Len() int { return len(l) }
+func (l ResourceSyncRuleList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l ResourceSyncRuleList) Less(i, j int) bool {
+ if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) < 0 {
+ return true
+ }
+ if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) > 0 {
+ return false
+ }
+ if strings.Compare(l[i].Source.Name, l[j].Source.Name) < 0 {
+ return true
+ }
+ return false
+}
+
+type ControllerSyncRules struct {
+ Secrets ResourceSyncRuleList `json:"secrets"`
+ Configs ResourceSyncRuleList `json:"configs"`
+}
+
+// ServeSyncRules provides a handler function to return the sync rules of the controller
+func (h *debugHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ syncRules := ControllerSyncRules{ResourceSyncRuleList{}, ResourceSyncRuleList{}}
+
+ h.controller.syncRuleLock.RLock()
+ defer h.controller.syncRuleLock.RUnlock()
+ syncRules.Secrets = append(syncRules.Secrets, resourceSyncRuleList(h.controller.secretSyncRules)...)
+ syncRules.Configs = append(syncRules.Configs, resourceSyncRuleList(h.controller.configMapSyncRules)...)
+
+ data, err := json.Marshal(syncRules)
+ if err != nil {
+ w.Write([]byte(err.Error()))
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ w.Write(data)
+ w.WriteHeader(http.StatusOK)
+}
+
+func resourceSyncRuleList(syncRules syncRules) ResourceSyncRuleList {
+ rules := make(ResourceSyncRuleList, 0, len(syncRules))
+ for dest, src := range syncRules {
+ rule := ResourceSyncRule{
+ Source: src,
+ Destination: dest,
+ }
+ rules = append(rules, rule)
+ }
+ sort.Sort(rules)
+ return rules
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go
new file mode 100644
index 0000000000..e1a165e63f
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go
@@ -0,0 +1,61 @@
+package v1helpers
+
+import (
+ "fmt"
+ "sort"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+// FlagsFromUnstructured process the unstructured arguments usually retrieved from an operator's configuration file under a specific key.
+// There are only two supported/valid types for arguments, that is []sting and/or string.
+// Passing a different type yield an error.
+//
+// Use ToFlagSlice function to get a slice of string flags.
+func FlagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) {
+ return flagsFromUnstructured(unstructuredArgs)
+}
+
+// ToFlagSlice transforms the provided arguments to a slice of string flags.
+// A flag name is taken directly from the key and the value is simply attached.
+// A flag is repeated iff it has more than one value.
+func ToFlagSlice(args map[string][]string) []string {
+ var keys []string
+ for key := range args {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+
+ var flags []string
+ for _, key := range keys {
+ for _, token := range args[key] {
+ flags = append(flags, fmt.Sprintf("--%s=%s", key, token))
+ }
+ }
+ return flags
+}
+
+// flagsFromUnstructured process the unstructured arguments (interface{}) to a map of strings.
+// There are only two supported/valid types for arguments, that is []sting and/or string.
+// Passing a different type yield an error.
+func flagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) {
+ ret := map[string][]string{}
+ for argName, argRawValue := range unstructuredArgs {
+ var argsSlice []string
+ var found bool
+ var err error
+
+ argsSlice, found, err = unstructured.NestedStringSlice(unstructuredArgs, argName)
+ if !found || err != nil {
+ str, found, err := unstructured.NestedString(unstructuredArgs, argName)
+ if !found || err != nil {
+ return nil, fmt.Errorf("unable to process an argument, incorrect value %v under %v key, expected []string or string", argRawValue, argName)
+ }
+ argsSlice = append(argsSlice, str)
+ }
+
+ ret[argName] = argsSlice
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go
new file mode 100644
index 0000000000..bdfe17d92a
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go
@@ -0,0 +1,127 @@
+package v1helpers
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+)
+
+var (
+ emptyGetOptions = metav1.GetOptions{}
+ emptyListOptions = metav1.ListOptions{}
+)
+
+type combinedConfigMapGetter struct {
+ client corev1client.ConfigMapsGetter
+ listers KubeInformersForNamespaces
+}
+
+func CachedConfigMapGetter(client corev1client.ConfigMapsGetter, listers KubeInformersForNamespaces) corev1client.ConfigMapsGetter {
+ return &combinedConfigMapGetter{
+ client: client,
+ listers: listers,
+ }
+}
+
+type combinedConfigMapInterface struct {
+ corev1client.ConfigMapInterface
+ lister corev1listers.ConfigMapNamespaceLister
+ namespace string
+}
+
+func (g combinedConfigMapGetter) ConfigMaps(namespace string) corev1client.ConfigMapInterface {
+ return combinedConfigMapInterface{
+ ConfigMapInterface: g.client.ConfigMaps(namespace),
+ lister: g.listers.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace),
+ namespace: namespace,
+ }
+}
+
+func (g combinedConfigMapInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.ConfigMap, error) {
+ if !equality.Semantic.DeepEqual(options, emptyGetOptions) {
+ return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options)
+ }
+
+ ret, err := g.lister.Get(name)
+ if err != nil {
+ return nil, err
+ }
+ return ret.DeepCopy(), nil
+}
+func (g combinedConfigMapInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.ConfigMapList, error) {
+ if !equality.Semantic.DeepEqual(options, emptyListOptions) {
+ return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options)
+ }
+
+ list, err := g.lister.List(labels.Everything())
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &corev1.ConfigMapList{}
+ for i := range list {
+ ret.Items = append(ret.Items, *(list[i].DeepCopy()))
+ }
+ return ret, nil
+}
+
+type combinedSecretGetter struct {
+ client corev1client.SecretsGetter
+ listers KubeInformersForNamespaces
+}
+
+func CachedSecretGetter(client corev1client.SecretsGetter, listers KubeInformersForNamespaces) corev1client.SecretsGetter {
+ return &combinedSecretGetter{
+ client: client,
+ listers: listers,
+ }
+}
+
+type combinedSecretInterface struct {
+ corev1client.SecretInterface
+ lister corev1listers.SecretNamespaceLister
+ namespace string
+}
+
+func (g combinedSecretGetter) Secrets(namespace string) corev1client.SecretInterface {
+ return combinedSecretInterface{
+ SecretInterface: g.client.Secrets(namespace),
+ lister: g.listers.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace),
+ namespace: namespace,
+ }
+}
+
+func (g combinedSecretInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.Secret, error) {
+ if !equality.Semantic.DeepEqual(options, emptyGetOptions) {
+ return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options)
+ }
+
+ ret, err := g.lister.Get(name)
+ if err != nil {
+ return nil, err
+ }
+ return ret.DeepCopy(), nil
+}
+
+func (g combinedSecretInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.SecretList, error) {
+ if !equality.Semantic.DeepEqual(options, emptyListOptions) {
+ return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options)
+ }
+
+ list, err := g.lister.List(labels.Everything())
+ if err != nil {
+ return nil, err
+ }
+
+ ret := &corev1.SecretList{}
+ for i := range list {
+ ret.Items = append(ret.Items, *(list[i].DeepCopy()))
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go
new file mode 100644
index 0000000000..8933328978
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go
@@ -0,0 +1,7 @@
+package v1helpers
+
+import "k8s.io/client-go/informers"
+
+func NewFakeKubeInformersForNamespaces(informers map[string]informers.SharedInformerFactory) KubeInformersForNamespaces {
+ return kubeInformersForNamespaces(informers)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go
new file mode 100644
index 0000000000..f0f2958d23
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go
@@ -0,0 +1,485 @@
+package v1helpers
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/client-go/util/retry"
+
+ "github.com/ghodss/yaml"
+
+ configv1 "github.com/openshift/api/config/v1"
+ operatorv1 "github.com/openshift/api/operator/v1"
+)
+
+// SetOperandVersion sets the new version and returns the previous value.
+func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string {
+ if versions == nil {
+ versions = &[]configv1.OperandVersion{}
+ }
+ existingVersion := FindOperandVersion(*versions, operandVersion.Name)
+ if existingVersion == nil {
+ *versions = append(*versions, operandVersion)
+ return ""
+ }
+
+ previous := existingVersion.Version
+ existingVersion.Version = operandVersion.Version
+ return previous
+}
+
+func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion {
+ if versions == nil {
+ return nil
+ }
+ for i := range versions {
+ if versions[i].Name == name {
+ return &versions[i]
+ }
+ }
+ return nil
+}
+
+func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) {
+ if conditions == nil {
+ conditions = &[]operatorv1.OperatorCondition{}
+ }
+ existingCondition := FindOperatorCondition(*conditions, newCondition.Type)
+ if existingCondition == nil {
+ newCondition.LastTransitionTime = metav1.NewTime(time.Now())
+ *conditions = append(*conditions, newCondition)
+ return
+ }
+
+ if existingCondition.Status != newCondition.Status {
+ existingCondition.Status = newCondition.Status
+ existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
+ }
+
+ existingCondition.Reason = newCondition.Reason
+ existingCondition.Message = newCondition.Message
+}
+
+func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) {
+ if conditions == nil {
+ conditions = &[]operatorv1.OperatorCondition{}
+ }
+ newConditions := []operatorv1.OperatorCondition{}
+ for _, condition := range *conditions {
+ if condition.Type != conditionType {
+ newConditions = append(newConditions, condition)
+ }
+ }
+
+ *conditions = newConditions
+}
+
+func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition {
+ for i := range conditions {
+ if conditions[i].Type == conditionType {
+ return &conditions[i]
+ }
+ }
+
+ return nil
+}
+
+func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool {
+ return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue)
+}
+
+func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool {
+ return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse)
+}
+
+func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ return condition.Status == status
+ }
+ }
+ return false
+}
+
+// UpdateOperatorSpecFunc is a func that mutates an operator spec.
+type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error
+
+// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client.
+func UpdateSpec(ctx context.Context, client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) {
+ updated := false
+ var operatorSpec *operatorv1.OperatorSpec
+ err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ oldSpec, _, resourceVersion, err := client.GetOperatorState()
+ if err != nil {
+ return err
+ }
+
+ newSpec := oldSpec.DeepCopy()
+ for _, update := range updateFuncs {
+ if err := update(newSpec); err != nil {
+ return err
+ }
+ }
+
+ if equality.Semantic.DeepEqual(oldSpec, newSpec) {
+ return nil
+ }
+
+ operatorSpec, _, err = client.UpdateOperatorSpec(ctx, resourceVersion, newSpec)
+ updated = err == nil
+ return err
+ })
+
+ return operatorSpec, updated, err
+}
+
+// UpdateSpecConfigFn returns a func to update the config.
+func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc {
+ return func(oldSpec *operatorv1.OperatorSpec) error {
+ oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}}
+ return nil
+ }
+}
+
+// UpdateStatusFunc is a func that mutates an operator status.
+type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error
+
+// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client.
+func UpdateStatus(ctx context.Context, client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) {
+ updated := false
+ var updatedOperatorStatus *operatorv1.OperatorStatus
+ err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ _, oldStatus, resourceVersion, err := client.GetOperatorState()
+ if err != nil {
+ return err
+ }
+
+ newStatus := oldStatus.DeepCopy()
+ for _, update := range updateFuncs {
+ if err := update(newStatus); err != nil {
+ return err
+ }
+ }
+
+ if equality.Semantic.DeepEqual(oldStatus, newStatus) {
+ // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied.
+ updatedOperatorStatus = newStatus
+ return nil
+ }
+
+ updatedOperatorStatus, err = client.UpdateOperatorStatus(ctx, resourceVersion, newStatus)
+ updated = err == nil
+ return err
+ })
+
+ return updatedOperatorStatus, updated, err
+}
+
+// UpdateConditionFunc returns a func to update a condition.
+func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc {
+ return func(oldStatus *operatorv1.OperatorStatus) error {
+ SetOperatorCondition(&oldStatus.Conditions, cond)
+ return nil
+ }
+}
+
+// UpdateStatusFunc is a func that mutates an operator status.
+type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error
+
+// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client.
+func UpdateStaticPodStatus(ctx context.Context, client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) {
+ updated := false
+ var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus
+ err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ _, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState()
+ if err != nil {
+ return err
+ }
+
+ newStatus := oldStatus.DeepCopy()
+ for _, update := range updateFuncs {
+ if err := update(newStatus); err != nil {
+ return err
+ }
+ }
+
+ if equality.Semantic.DeepEqual(oldStatus, newStatus) {
+ // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied.
+ updatedOperatorStatus = newStatus
+ return nil
+ }
+
+ updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(ctx, resourceVersion, newStatus)
+ updated = err == nil
+ return err
+ })
+
+ return updatedOperatorStatus, updated, err
+}
+
+// UpdateStaticPodConditionFn returns a func to update a condition.
+func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc {
+ return func(oldStatus *operatorv1.StaticPodOperatorStatus) error {
+ SetOperatorCondition(&oldStatus.Conditions, cond)
+ return nil
+ }
+}
+
+// EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise.
+// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0])
+// It re-tries on conflicts.
+func EnsureFinalizer(ctx context.Context, client OperatorClientWithFinalizers, controllerName string) error {
+ finalizer := getFinalizerName(controllerName)
+ err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ return client.EnsureFinalizer(ctx, finalizer)
+ })
+ return err
+}
+
+// RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise.
+// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0])
+// It re-tries on conflicts.
+func RemoveFinalizer(ctx context.Context, client OperatorClientWithFinalizers, controllerName string) error {
+ finalizer := getFinalizerName(controllerName)
+ err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ return client.RemoveFinalizer(ctx, finalizer)
+ })
+ return err
+}
+
+// getFinalizerName computes a nice finalizer name from controllerName and the operator name ($OPERATOR_NAME or os.Args[0]).
+func getFinalizerName(controllerName string) string {
+ return fmt.Sprintf("%s.operator.openshift.io/%s", getOperatorName(), controllerName)
+}
+
+func getOperatorName() string {
+ if name := os.Getenv("OPERATOR_NAME"); name != "" {
+ return name
+ }
+ return os.Args[0]
+}
+
+type aggregate []error
+
+var _ utilerrors.Aggregate = aggregate{}
+
+// NewMultiLineAggregate returns an aggregate error with multi-line output
+func NewMultiLineAggregate(errList []error) error {
+ var errs []error
+ for _, e := range errList {
+ if e != nil {
+ errs = append(errs, e)
+ }
+ }
+ if len(errs) == 0 {
+ return nil
+ }
+ return aggregate(errs)
+}
+
+// Error is part of the error interface.
+func (agg aggregate) Error() string {
+ msgs := make([]string, len(agg))
+ for i := range agg {
+ msgs[i] = agg[i].Error()
+ }
+ return strings.Join(msgs, "\n")
+}
+
+// Errors is part of the Aggregate interface.
+func (agg aggregate) Errors() []error {
+ return []error(agg)
+}
+
+// Is is part of the Aggregate interface
+func (agg aggregate) Is(target error) bool {
+ return agg.visit(func(err error) bool {
+ return errors.Is(err, target)
+ })
+}
+
+func (agg aggregate) visit(f func(err error) bool) bool {
+ for _, err := range agg {
+ switch err := err.(type) {
+ case aggregate:
+ if match := err.visit(f); match {
+ return match
+ }
+ case utilerrors.Aggregate:
+ for _, nestedErr := range err.Errors() {
+ if match := f(nestedErr); match {
+ return match
+ }
+ }
+ default:
+ if match := f(err); match {
+ return match
+ }
+ }
+ }
+
+ return false
+}
+
+// MapToEnvVars converts a string-string map to a slice of corev1.EnvVar-s
+func MapToEnvVars(mapEnvVars map[string]string) []corev1.EnvVar {
+ if mapEnvVars == nil {
+ return nil
+ }
+
+ envVars := make([]corev1.EnvVar, len(mapEnvVars))
+ i := 0
+ for k, v := range mapEnvVars {
+ envVars[i] = corev1.EnvVar{Name: k, Value: v}
+ i++
+ }
+
+ // need to sort the slice so that kube-controller-manager-pod configmap does not change all the time
+ sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name })
+ return envVars
+}
+
+// InjectObservedProxyIntoContainers injects proxy environment variables in containers specified in containerNames.
+func InjectObservedProxyIntoContainers(podSpec *corev1.PodSpec, containerNames []string, observedConfig []byte, fields ...string) error {
+ var config map[string]interface{}
+ if err := yaml.Unmarshal(observedConfig, &config); err != nil {
+ return fmt.Errorf("failed to unmarshal the observedConfig: %w", err)
+ }
+
+ proxyConfig, found, err := unstructured.NestedStringMap(config, fields...)
+ if err != nil {
+ return fmt.Errorf("couldn't get the proxy config from observedConfig: %w", err)
+ }
+
+ proxyEnvVars := MapToEnvVars(proxyConfig)
+ if !found || len(proxyEnvVars) < 1 {
+ // There's no observed proxy config, we should tolerate that
+ return nil
+ }
+
+ for _, containerName := range containerNames {
+ for i := range podSpec.InitContainers {
+ if podSpec.InitContainers[i].Name == containerName {
+ podSpec.InitContainers[i].Env = append(podSpec.InitContainers[i].Env, proxyEnvVars...)
+ }
+ }
+ for i := range podSpec.Containers {
+ if podSpec.Containers[i].Name == containerName {
+ podSpec.Containers[i].Env = append(podSpec.Containers[i].Env, proxyEnvVars...)
+ }
+ }
+ }
+
+ return nil
+}
+
+func InjectTrustedCAIntoContainers(podSpec *corev1.PodSpec, configMapName string, containerNames []string) error {
+ podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{
+ Name: "non-standard-root-system-trust-ca-bundle",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: configMapName,
+ },
+ Items: []corev1.KeyToPath{
+ {Key: "ca-bundle.crt", Path: "tls-ca-bundle.pem"},
+ },
+ },
+ },
+ })
+
+ for _, containerName := range containerNames {
+ for i := range podSpec.InitContainers {
+ if podSpec.InitContainers[i].Name == containerName {
+ podSpec.InitContainers[i].VolumeMounts = append(podSpec.InitContainers[i].VolumeMounts, corev1.VolumeMount{
+ Name: "non-standard-root-system-trust-ca-bundle",
+ MountPath: "/etc/pki/ca-trust/extracted/pem",
+ ReadOnly: true,
+ })
+ }
+ }
+ for i := range podSpec.Containers {
+ if podSpec.Containers[i].Name == containerName {
+ podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{
+ Name: "non-standard-root-system-trust-ca-bundle",
+ MountPath: "/etc/pki/ca-trust/extracted/pem",
+ ReadOnly: true,
+ })
+ }
+ }
+ }
+
+ return nil
+}
+
+func SetCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) {
+ if conditions == nil {
+ conditions = &[]metav1.Condition{}
+ }
+ existingCondition := FindCondition(*conditions, newCondition.Type)
+ if existingCondition == nil {
+ newCondition.LastTransitionTime = metav1.NewTime(time.Now())
+ *conditions = append(*conditions, newCondition)
+ return
+ }
+
+ if existingCondition.Status != newCondition.Status {
+ existingCondition.Status = newCondition.Status
+ existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
+ }
+
+ existingCondition.Reason = newCondition.Reason
+ existingCondition.Message = newCondition.Message
+}
+
+func RemoveCondition(conditions *[]metav1.Condition, conditionType string) {
+ if conditions == nil {
+ conditions = &[]metav1.Condition{}
+ }
+ newConditions := []metav1.Condition{}
+ for _, condition := range *conditions {
+ if condition.Type != conditionType {
+ newConditions = append(newConditions, condition)
+ }
+ }
+
+ *conditions = newConditions
+}
+
+func FindCondition(conditions []metav1.Condition, conditionType string) *metav1.Condition {
+ for i := range conditions {
+ if conditions[i].Type == conditionType {
+ return &conditions[i]
+ }
+ }
+
+ return nil
+}
+
+func IsConditionTrue(conditions []metav1.Condition, conditionType string) bool {
+ return IsConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue)
+}
+
+func IsConditionFalse(conditions []metav1.Condition, conditionType string) bool {
+ return IsConditionPresentAndEqual(conditions, conditionType, metav1.ConditionFalse)
+}
+
+func IsConditionPresentAndEqual(conditions []metav1.Condition, conditionType string, status metav1.ConditionStatus) bool {
+ for _, condition := range conditions {
+ if condition.Type == conditionType {
+ return condition.Status == status
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go
new file mode 100644
index 0000000000..ba3769252d
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go
@@ -0,0 +1,135 @@
+package v1helpers
+
+import (
+ "fmt"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/client-go/informers"
+ "k8s.io/client-go/kubernetes"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+)
+
+// KubeInformersForNamespaces is a simple way to combine several shared informers into a single struct with unified listing power
+type KubeInformersForNamespaces interface {
+ Start(stopCh <-chan struct{})
+ InformersFor(namespace string) informers.SharedInformerFactory
+ Namespaces() sets.String
+
+ ConfigMapLister() corev1listers.ConfigMapLister
+ SecretLister() corev1listers.SecretLister
+
+ // Used in by workloads controller and controllers that report deployment pods status
+ PodLister() corev1listers.PodLister
+}
+
+var _ KubeInformersForNamespaces = kubeInformersForNamespaces{}
+
+func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces {
+ ret := kubeInformersForNamespaces{}
+ for _, namespace := range namespaces {
+ if len(namespace) == 0 {
+ ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute)
+ continue
+ }
+ ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace))
+ }
+
+ return ret
+}
+
+type kubeInformersForNamespaces map[string]informers.SharedInformerFactory
+
+func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) {
+ for _, informer := range i {
+ informer.Start(stopCh)
+ }
+}
+
+func (i kubeInformersForNamespaces) Namespaces() sets.String {
+ return sets.StringKeySet(i)
+}
+func (i kubeInformersForNamespaces) InformersFor(namespace string) informers.SharedInformerFactory {
+ return i[namespace]
+}
+
+func (i kubeInformersForNamespaces) HasInformersFor(namespace string) bool {
+ return i.InformersFor(namespace) != nil
+}
+
+type configMapLister kubeInformersForNamespaces
+
+func (i kubeInformersForNamespaces) ConfigMapLister() corev1listers.ConfigMapLister {
+ return configMapLister(i)
+}
+
+func (l configMapLister) List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) {
+ globalInformer, ok := l[""]
+ if !ok {
+ return nil, fmt.Errorf("combinedLister does not support cross namespace list")
+ }
+
+ return globalInformer.Core().V1().ConfigMaps().Lister().List(selector)
+}
+
+func (l configMapLister) ConfigMaps(namespace string) corev1listers.ConfigMapNamespaceLister {
+ informer, ok := l[namespace]
+ if !ok {
+ // coding error
+ panic(fmt.Sprintf("namespace %q is missing", namespace))
+ }
+
+ return informer.Core().V1().ConfigMaps().Lister().ConfigMaps(namespace)
+}
+
+type secretLister kubeInformersForNamespaces
+
+func (i kubeInformersForNamespaces) SecretLister() corev1listers.SecretLister {
+ return secretLister(i)
+}
+
+func (l secretLister) List(selector labels.Selector) (ret []*corev1.Secret, err error) {
+ globalInformer, ok := l[""]
+ if !ok {
+ return nil, fmt.Errorf("combinedLister does not support cross namespace list")
+ }
+
+ return globalInformer.Core().V1().Secrets().Lister().List(selector)
+}
+
+func (l secretLister) Secrets(namespace string) corev1listers.SecretNamespaceLister {
+ informer, ok := l[namespace]
+ if !ok {
+ // coding error
+ panic(fmt.Sprintf("namespace %q is missing", namespace))
+ }
+
+ return informer.Core().V1().Secrets().Lister().Secrets(namespace)
+}
+
+type podLister kubeInformersForNamespaces
+
+func (i kubeInformersForNamespaces) PodLister() corev1listers.PodLister {
+ return podLister(i)
+}
+
+func (l podLister) List(selector labels.Selector) (ret []*corev1.Pod, err error) {
+ globalInformer, ok := l[""]
+ if !ok {
+ return nil, fmt.Errorf("combinedLister does not support cross namespace list")
+ }
+
+ return globalInformer.Core().V1().Pods().Lister().List(selector)
+}
+
+func (l podLister) Pods(namespace string) corev1listers.PodNamespaceLister {
+ informer, ok := l[namespace]
+ if !ok {
+ // coding error
+ panic(fmt.Sprintf("namespace %q is missing", namespace))
+ }
+
+ return informer.Core().V1().Pods().Lister().Pods(namespace)
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go
new file mode 100644
index 0000000000..d61d302946
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go
@@ -0,0 +1,43 @@
+package v1helpers
+
+import (
+ "context"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/cache"
+)
+
+type OperatorClient interface {
+ Informer() cache.SharedIndexInformer
+ // GetObjectMeta return the operator metadata.
+ GetObjectMeta() (meta *metav1.ObjectMeta, err error)
+ // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister.
+ GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error)
+ // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version.
+ UpdateOperatorSpec(ctx context.Context, oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error)
+ // UpdateOperatorStatus updates the status of the operator, assuming the given resource version.
+ UpdateOperatorStatus(ctx context.Context, oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error)
+}
+
+type StaticPodOperatorClient interface {
+ OperatorClient
+ // GetStaticPodOperatorState returns the static pod operator spec, status and the resource version,
+ // potentially from a lister.
+ GetStaticPodOperatorState() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error)
+ // GetStaticPodOperatorStateWithQuorum return the static pod operator spec, status and resource version
+ // directly from a server read.
+ GetStaticPodOperatorStateWithQuorum(ctx context.Context) (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error)
+ // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version.
+ UpdateStaticPodOperatorStatus(ctx context.Context, resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error)
+ // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version.
+ UpdateStaticPodOperatorSpec(ctx context.Context, resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error)
+}
+
+type OperatorClientWithFinalizers interface {
+ OperatorClient
+ // EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise.
+ EnsureFinalizer(ctx context.Context, finalizer string) error
+ // RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise.
+ RemoveFinalizer(ctx context.Context, finalizer string) error
+}
diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go
new file mode 100644
index 0000000000..004adc2be7
--- /dev/null
+++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go
@@ -0,0 +1,302 @@
+package v1helpers
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/kubernetes"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+ "k8s.io/client-go/tools/cache"
+
+ operatorv1 "github.com/openshift/api/operator/v1"
+)
+
+// NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests.
+func NewFakeSharedIndexInformer() cache.SharedIndexInformer {
+ return &fakeSharedIndexInformer{}
+}
+
+type fakeSharedIndexInformer struct{}
+
+func (i fakeSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) {
+ return nil, nil
+}
+
+func (i fakeSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) (cache.ResourceEventHandlerRegistration, error) {
+ return nil, nil
+}
+
+func (i fakeSharedIndexInformer) RemoveEventHandler(handle cache.ResourceEventHandlerRegistration) error {
+ panic("implement me")
+}
+
+func (i fakeSharedIndexInformer) IsStopped() bool {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) GetStore() cache.Store {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) GetController() cache.Controller {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) Run(stopCh <-chan struct{}) {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) HasSynced() bool {
+ return true
+}
+
+func (fakeSharedIndexInformer) LastSyncResourceVersion() string {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) GetIndexer() cache.Indexer {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) SetWatchErrorHandler(handler cache.WatchErrorHandler) error {
+ panic("implement me")
+}
+
+func (fakeSharedIndexInformer) SetTransform(f cache.TransformFunc) error {
+ panic("implement me")
+}
+
+// NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests.
+func NewFakeStaticPodOperatorClient(
+ staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus,
+ triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error,
+ triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient {
+ return &fakeStaticPodOperatorClient{
+ fakeStaticPodOperatorSpec: staticPodSpec,
+ fakeStaticPodOperatorStatus: staticPodStatus,
+ resourceVersion: "0",
+ triggerStatusUpdateError: triggerStatusErr,
+ triggerSpecUpdateError: triggerSpecErr,
+ }
+}
+
+type fakeStaticPodOperatorClient struct {
+ fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec
+ fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus
+ resourceVersion string
+ triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error
+ triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error
+}
+
+func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer {
+ return &fakeSharedIndexInformer{}
+
+}
+func (c *fakeStaticPodOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) {
+ panic("not supported")
+}
+
+func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) {
+ return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil
+}
+
+func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum(ctx context.Context) (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) {
+ return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil
+}
+
+func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(ctx context.Context, resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) {
+ if c.resourceVersion != resourceVersion {
+ return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion"))
+ }
+ rv, err := strconv.Atoi(resourceVersion)
+ if err != nil {
+ return nil, err
+ }
+ c.resourceVersion = strconv.Itoa(rv + 1)
+ if c.triggerStatusUpdateError != nil {
+ if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil {
+ return nil, err
+ }
+ }
+ c.fakeStaticPodOperatorStatus = status
+ return c.fakeStaticPodOperatorStatus, nil
+}
+
+func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(ctx context.Context, resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) {
+ if c.resourceVersion != resourceVersion {
+ return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion"))
+ }
+ rv, err := strconv.Atoi(resourceVersion)
+ if err != nil {
+ return nil, "", err
+ }
+ c.resourceVersion = strconv.Itoa(rv + 1)
+ if c.triggerSpecUpdateError != nil {
+ if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil {
+ return nil, "", err
+ }
+ }
+ c.fakeStaticPodOperatorSpec = spec
+ return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil
+}
+
+func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) {
+ return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil
+}
+func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(ctx context.Context, s string, p *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) {
+ panic("not supported")
+}
+func (c *fakeStaticPodOperatorClient) UpdateOperatorStatus(ctx context.Context, resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) {
+ if c.resourceVersion != resourceVersion {
+ return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion"))
+ }
+ rv, err := strconv.Atoi(resourceVersion)
+ if err != nil {
+ return nil, err
+ }
+ c.resourceVersion = strconv.Itoa(rv + 1)
+ if c.triggerStatusUpdateError != nil {
+ staticPodStatus := c.fakeStaticPodOperatorStatus.DeepCopy()
+ staticPodStatus.OperatorStatus = *status
+ if err := c.triggerStatusUpdateError(resourceVersion, staticPodStatus); err != nil {
+ return nil, err
+ }
+ }
+ c.fakeStaticPodOperatorStatus.OperatorStatus = *status
+ return &c.fakeStaticPodOperatorStatus.OperatorStatus, nil
+}
+
+// NewFakeNodeLister returns a fake node lister suitable to use in node controller unit test
+func NewFakeNodeLister(client kubernetes.Interface) corev1listers.NodeLister {
+ return &fakeNodeLister{client: client}
+}
+
+type fakeNodeLister struct {
+ client kubernetes.Interface
+}
+
+func (n *fakeNodeLister) List(selector labels.Selector) ([]*corev1.Node, error) {
+ nodes, err := n.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
+ if err != nil {
+ return nil, err
+ }
+ ret := []*corev1.Node{}
+ for i := range nodes.Items {
+ ret = append(ret, &nodes.Items[i])
+ }
+ return ret, nil
+}
+
+func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) {
+ panic("implement me")
+}
+
+// NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests.
+func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers {
+ return NewFakeOperatorClientWithObjectMeta(nil, spec, status, triggerErr)
+}
+
+func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers {
+ return &fakeOperatorClient{
+ fakeOperatorSpec: spec,
+ fakeOperatorStatus: status,
+ fakeObjectMeta: meta,
+ resourceVersion: "0",
+ triggerStatusUpdateError: triggerErr,
+ }
+}
+
+type fakeOperatorClient struct {
+ fakeOperatorSpec *operatorv1.OperatorSpec
+ fakeOperatorStatus *operatorv1.OperatorStatus
+ fakeObjectMeta *metav1.ObjectMeta
+ resourceVersion string
+ triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error
+}
+
+func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer {
+ return &fakeSharedIndexInformer{}
+}
+
+func (c *fakeOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) {
+ if c.fakeObjectMeta == nil {
+ return &metav1.ObjectMeta{}, nil
+ }
+
+ return c.fakeObjectMeta, nil
+}
+
+func (c *fakeOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) {
+ return c.fakeOperatorSpec, c.fakeOperatorStatus, c.resourceVersion, nil
+}
+
+func (c *fakeOperatorClient) UpdateOperatorStatus(ctx context.Context, resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) {
+ if c.resourceVersion != resourceVersion {
+ return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion"))
+ }
+ rv, err := strconv.Atoi(resourceVersion)
+ if err != nil {
+ return nil, err
+ }
+ c.resourceVersion = strconv.Itoa(rv + 1)
+ if c.triggerStatusUpdateError != nil {
+ if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil {
+ return nil, err
+ }
+ }
+ c.fakeOperatorStatus = status
+ return c.fakeOperatorStatus, nil
+}
+
+func (c *fakeOperatorClient) UpdateOperatorSpec(ctx context.Context, resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) {
+ if c.resourceVersion != resourceVersion {
+ return nil, c.resourceVersion, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion"))
+ }
+ rv, err := strconv.Atoi(resourceVersion)
+ if err != nil {
+ return nil, c.resourceVersion, err
+ }
+ c.resourceVersion = strconv.Itoa(rv + 1)
+ c.fakeOperatorSpec = spec
+ return c.fakeOperatorSpec, c.resourceVersion, nil
+}
+
+func (c *fakeOperatorClient) EnsureFinalizer(ctx context.Context, finalizer string) error {
+ if c.fakeObjectMeta == nil {
+ c.fakeObjectMeta = &metav1.ObjectMeta{}
+ }
+ for _, f := range c.fakeObjectMeta.Finalizers {
+ if f == finalizer {
+ return nil
+ }
+ }
+ c.fakeObjectMeta.Finalizers = append(c.fakeObjectMeta.Finalizers, finalizer)
+ return nil
+}
+
+func (c *fakeOperatorClient) RemoveFinalizer(ctx context.Context, finalizer string) error {
+ newFinalizers := []string{}
+ for _, f := range c.fakeObjectMeta.Finalizers {
+ if f == finalizer {
+ continue
+ }
+ newFinalizers = append(newFinalizers, f)
+ }
+ c.fakeObjectMeta.Finalizers = newFinalizers
+ return nil
+}
+
+func (c *fakeOperatorClient) SetObjectMeta(meta *metav1.ObjectMeta) {
+ c.fakeObjectMeta = meta
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go
new file mode 100644
index 0000000000..f4c92913a5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package collectors provides implementations of prometheus.Collector to
+// conveniently collect process and Go-related metrics.
+package collectors
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// NewBuildInfoCollector returns a collector collecting a single metric
+// "go_build_info" with the constant value 1 and three labels "path", "version",
+// and "checksum". Their label values contain the main module path, version, and
+// checksum, respectively. The labels will only have meaningful values if the
+// binary is built with Go module support and from source code retrieved from
+// the source repository (rather than the local file system). This is usually
+// accomplished by building from outside of GOPATH, specifying the full address
+// of the main package, e.g. "GO111MODULE=on go run
+// github.com/prometheus/client_golang/examples/random". If built without Go
+// module support, all label values will be "unknown". If built with Go module
+// support but using the source code from the local file system, the "path" will
+// be set appropriately, but "checksum" will be empty and "version" will be
+// "(devel)".
+//
+// This collector uses only the build information for the main module. See
+// https://github.com/povilasv/prommod for an example of a collector for the
+// module dependencies.
+func NewBuildInfoCollector() prometheus.Collector {
+ //nolint:staticcheck // Ignore SA1019 until v2.
+ return prometheus.NewBuildInfoCollector()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go
new file mode 100644
index 0000000000..d5a7279fb9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package collectors
+
+import (
+ "database/sql"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type dbStatsCollector struct {
+ db *sql.DB
+
+ maxOpenConnections *prometheus.Desc
+
+ openConnections *prometheus.Desc
+ inUseConnections *prometheus.Desc
+ idleConnections *prometheus.Desc
+
+ waitCount *prometheus.Desc
+ waitDuration *prometheus.Desc
+ maxIdleClosed *prometheus.Desc
+ maxIdleTimeClosed *prometheus.Desc
+ maxLifetimeClosed *prometheus.Desc
+}
+
+// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB.
+// See https://golang.org/pkg/database/sql/#DBStats for more information on stats.
+func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector {
+ fqName := func(name string) string {
+ return "go_sql_" + name
+ }
+ return &dbStatsCollector{
+ db: db,
+ maxOpenConnections: prometheus.NewDesc(
+ fqName("max_open_connections"),
+ "Maximum number of open connections to the database.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ openConnections: prometheus.NewDesc(
+ fqName("open_connections"),
+ "The number of established connections both in use and idle.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ inUseConnections: prometheus.NewDesc(
+ fqName("in_use_connections"),
+ "The number of connections currently in use.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ idleConnections: prometheus.NewDesc(
+ fqName("idle_connections"),
+ "The number of idle connections.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ waitCount: prometheus.NewDesc(
+ fqName("wait_count_total"),
+ "The total number of connections waited for.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ waitDuration: prometheus.NewDesc(
+ fqName("wait_duration_seconds_total"),
+ "The total time blocked waiting for a new connection.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ maxIdleClosed: prometheus.NewDesc(
+ fqName("max_idle_closed_total"),
+ "The total number of connections closed due to SetMaxIdleConns.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ maxIdleTimeClosed: prometheus.NewDesc(
+ fqName("max_idle_time_closed_total"),
+ "The total number of connections closed due to SetConnMaxIdleTime.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ maxLifetimeClosed: prometheus.NewDesc(
+ fqName("max_lifetime_closed_total"),
+ "The total number of connections closed due to SetConnMaxLifetime.",
+ nil, prometheus.Labels{"db_name": dbName},
+ ),
+ }
+}
+
+// Describe implements Collector.
+func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) {
+ ch <- c.maxOpenConnections
+ ch <- c.openConnections
+ ch <- c.inUseConnections
+ ch <- c.idleConnections
+ ch <- c.waitCount
+ ch <- c.waitDuration
+ ch <- c.maxIdleClosed
+ ch <- c.maxLifetimeClosed
+ ch <- c.maxIdleTimeClosed
+}
+
+// Collect implements Collector.
+func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) {
+ stats := c.db.Stats()
+ ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections))
+ ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections))
+ ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse))
+ ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle))
+ ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount))
+ ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds())
+ ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed))
+ ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed))
+ ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed))
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go
new file mode 100644
index 0000000000..b22d862fbc
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go
@@ -0,0 +1,57 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package collectors
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// NewExpvarCollector returns a newly allocated expvar Collector.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototyping, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector {
+ //nolint:staticcheck // Ignore SA1019 until v2.
+ return prometheus.NewExpvarCollector(exports)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go
new file mode 100644
index 0000000000..effc57840a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go
@@ -0,0 +1,49 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package collectors
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// NewGoCollector returns a collector that exports metrics about the current Go
+// process. This includes memory stats. To collect those, runtime.ReadMemStats
+// is called. This requires to “stop the world”, which usually only happens for
+// garbage collection (GC). Take the following implications into account when
+// deciding whether to use the Go collector:
+//
+// 1. The performance impact of stopping the world is the more relevant the more
+// frequently metrics are collected. However, with Go1.9 or later the
+// stop-the-world time per metrics collection is very short (~25µs) so that the
+// performance impact will only matter in rare cases. However, with older Go
+// versions, the stop-the-world duration depends on the heap size and can be
+// quite significant (~1.7 ms/GiB as per
+// https://go-review.googlesource.com/c/go/+/34937).
+//
+// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
+// metrics collection happens to coincide with GC, it will only complete after
+// GC has finished. Usually, GC is fast enough to not cause problems. However,
+// with a very large heap, GC might take multiple seconds, which is enough to
+// cause scrape timeouts in common setups. To avoid this problem, the Go
+// collector will use the memstats from a previous collection if
+// runtime.ReadMemStats takes more than 1s. However, if there are no previously
+// collected memstats, or their collection is more than 5m ago, the collection
+// will block until runtime.ReadMemStats succeeds.
+//
+// NOTE: The problem is solved in Go 1.15, see
+// https://github.com/golang/go/issues/19812 for the related Go issue.
+func NewGoCollector() prometheus.Collector {
+ return prometheus.NewGoCollector()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
new file mode 100644
index 0000000000..cc4ef1077e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
@@ -0,0 +1,167 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package collectors
+
+import (
+ "regexp"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/internal"
+)
+
+var (
+ // MetricsAll allows all the metrics to be collected from Go runtime.
+ MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")}
+ // MetricsGC allows only GC metrics to be collected from Go runtime.
+ // e.g. go_gc_cycles_automatic_gc_cycles_total
+ // NOTE: This does not include new class of "/cpu/classes/gc/..." metrics.
+ // Use custom metric rule to access those.
+ MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)}
+ // MetricsMemory allows only memory metrics to be collected from Go runtime.
+ // e.g. go_memory_classes_heap_free_bytes
+ MetricsMemory = GoRuntimeMetricsRule{regexp.MustCompile(`^/memory/.*`)}
+ // MetricsScheduler allows only scheduler metrics to be collected from Go runtime.
+ // e.g. go_sched_goroutines_goroutines
+ MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)}
+ // MetricsDebug allows only debug metrics to be collected from Go runtime.
+ // e.g. go_godebug_non_default_behavior_gocachetest_events_total
+ MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)}
+)
+
+// WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as:
+//
+// go_memstats_alloc_bytes
+// go_memstats_alloc_bytes_total
+// go_memstats_sys_bytes
+// go_memstats_mallocs_total
+// go_memstats_frees_total
+// go_memstats_heap_alloc_bytes
+// go_memstats_heap_sys_bytes
+// go_memstats_heap_idle_bytes
+// go_memstats_heap_inuse_bytes
+// go_memstats_heap_released_bytes
+// go_memstats_heap_objects
+// go_memstats_stack_inuse_bytes
+// go_memstats_stack_sys_bytes
+// go_memstats_mspan_inuse_bytes
+// go_memstats_mspan_sys_bytes
+// go_memstats_mcache_inuse_bytes
+// go_memstats_mcache_sys_bytes
+// go_memstats_buck_hash_sys_bytes
+// go_memstats_gc_sys_bytes
+// go_memstats_other_sys_bytes
+// go_memstats_next_gc_bytes
+//
+// so the metrics known from pre client_golang v1.12.0,
+//
+// NOTE(bwplotka): The above represents runtime.MemStats statistics, but they are
+// actually implemented using new runtime/metrics package. (except skipped go_memstats_gc_cpu_fraction
+// -- see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation).
+//
+// Some users might want to disable this on collector level (although you can use scrape relabelling on Prometheus),
+// because similar metrics can be now obtained using WithGoCollectorRuntimeMetrics. Note that the semantics of new
+// metrics might be different, plus the names can be change over time with different Go version.
+//
+// NOTE(bwplotka): Changing metric names can be tedious at times as the alerts, recording rules and dashboards have to be adjusted.
+// The old metrics are also very useful, with many guides and books written about how to interpret them.
+//
+// As a result our recommendation would be to stick with MemStats like metrics and enable other runtime/metrics if you are interested
+// in advanced insights Go provides. See ExampleGoCollector_WithAdvancedGoMetrics.
+func WithGoCollectorMemStatsMetricsDisabled() func(options *internal.GoCollectorOptions) {
+ return func(o *internal.GoCollectorOptions) {
+ o.DisableMemStatsLikeMetrics = true
+ }
+}
+
+// GoRuntimeMetricsRule allow enabling and configuring particular group of runtime/metrics.
+// TODO(bwplotka): Consider adding ability to adjust buckets.
+type GoRuntimeMetricsRule struct {
+ // Matcher represents RE2 expression will match the runtime/metrics from https://golang.bg/src/runtime/metrics/description.go
+ // Use `regexp.MustCompile` or `regexp.Compile` to create this field.
+ Matcher *regexp.Regexp
+}
+
+// WithGoCollectorRuntimeMetrics allows enabling and configuring particular group of runtime/metrics.
+// See the list of metrics https://golang.bg/src/runtime/metrics/description.go (pick the Go version you use there!).
+// You can use this option in repeated manner, which will add new rules. The order of rules is important, the last rule
+// that matches particular metrics is applied.
+func WithGoCollectorRuntimeMetrics(rules ...GoRuntimeMetricsRule) func(options *internal.GoCollectorOptions) {
+ rs := make([]internal.GoCollectorRule, len(rules))
+ for i, r := range rules {
+ rs[i] = internal.GoCollectorRule{
+ Matcher: r.Matcher,
+ }
+ }
+
+ return func(o *internal.GoCollectorOptions) {
+ o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...)
+ }
+}
+
+// WithoutGoCollectorRuntimeMetrics allows disabling group of runtime/metrics that you might have added in WithGoCollectorRuntimeMetrics.
+// It behaves similarly to WithGoCollectorRuntimeMetrics just with deny-list semantics.
+func WithoutGoCollectorRuntimeMetrics(matchers ...*regexp.Regexp) func(options *internal.GoCollectorOptions) {
+ rs := make([]internal.GoCollectorRule, len(matchers))
+ for i, m := range matchers {
+ rs[i] = internal.GoCollectorRule{
+ Matcher: m,
+ Deny: true,
+ }
+ }
+
+ return func(o *internal.GoCollectorOptions) {
+ o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...)
+ }
+}
+
+// GoCollectionOption represents Go collection option flag.
+// Deprecated.
+type GoCollectionOption uint32
+
+const (
+ // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure.
+ //
+ // Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector.
+ GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota
+ // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package.
+ //
+ // Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})
+ // function to enable those metrics in the collector.
+ GoRuntimeMetricsCollection
+)
+
+// WithGoCollections allows enabling different collections for Go collector on top of base metrics.
+//
+// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics.
+func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) {
+ return func(options *internal.GoCollectorOptions) {
+ if flags&GoRuntimeMemStatsCollection == 0 {
+ WithGoCollectorMemStatsMetricsDisabled()(options)
+ }
+
+ if flags&GoRuntimeMetricsCollection != 0 {
+ WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})(options)
+ }
+ }
+}
+
+// NewGoCollector returns a collector that exports metrics about the current Go
+// process using debug.GCStats (base metrics) and runtime/metrics (both in MemStats style and new ones).
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) prometheus.Collector {
+ //nolint:staticcheck // Ignore SA1019 until v2.
+ return prometheus.NewGoCollector(opts...)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go
new file mode 100644
index 0000000000..24558f50a7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package collectors
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+ // PidFn returns the PID of the process the collector collects metrics
+ // for. It is called upon each collection. By default, the PID of the
+ // current process is used, as determined on construction time by
+ // calling os.Getpid().
+ PidFn func() (int, error)
+ // If non-empty, each of the collected metrics is prefixed by the
+ // provided string and an underscore ("_").
+ Namespace string
+ // If true, any error encountered during collection is reported as an
+ // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+ // and the collected metrics will be incomplete. (Possibly, no metrics
+ // will be collected at all.) While that's usually not desired, it is
+ // appropriate for the common "mix-in" of process metrics, where process
+ // metrics are nice to have, but failing to collect them should not
+ // disrupt the collection of the remaining metrics.
+ ReportErrors bool
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including CPU, memory and file descriptor usage as well as
+// the process start time. The detailed behavior is defined by the provided
+// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
+// collector for the current process with an empty namespace string and no error
+// reporting.
+//
+// The collector only works on operating systems with a Linux-style proc
+// filesystem and on Microsoft Windows. On other operating systems, it will not
+// collect any metrics.
+func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector {
+ //nolint:staticcheck // Ignore SA1019 until v2.
+ return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{
+ PidFn: opts.PidFn,
+ Namespace: opts.Namespace,
+ ReportErrors: opts.ReportErrors,
+ })
+}
diff --git a/vendor/github.com/robfig/cron/.gitignore b/vendor/github.com/robfig/cron/.gitignore
new file mode 100644
index 0000000000..00268614f0
--- /dev/null
+++ b/vendor/github.com/robfig/cron/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/robfig/cron/.travis.yml b/vendor/github.com/robfig/cron/.travis.yml
new file mode 100644
index 0000000000..4f2ee4d973
--- /dev/null
+++ b/vendor/github.com/robfig/cron/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/robfig/cron/LICENSE b/vendor/github.com/robfig/cron/LICENSE
new file mode 100644
index 0000000000..3a0f627ffe
--- /dev/null
+++ b/vendor/github.com/robfig/cron/LICENSE
@@ -0,0 +1,21 @@
+Copyright (C) 2012 Rob Figueiredo
+All Rights Reserved.
+
+MIT LICENSE
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/robfig/cron/README.md b/vendor/github.com/robfig/cron/README.md
new file mode 100644
index 0000000000..ec40c95fcb
--- /dev/null
+++ b/vendor/github.com/robfig/cron/README.md
@@ -0,0 +1,6 @@
+[](http://godoc.org/github.com/robfig/cron)
+[](https://travis-ci.org/robfig/cron)
+
+# cron
+
+Documentation here: https://godoc.org/github.com/robfig/cron
diff --git a/vendor/github.com/robfig/cron/constantdelay.go b/vendor/github.com/robfig/cron/constantdelay.go
new file mode 100644
index 0000000000..cd6e7b1be9
--- /dev/null
+++ b/vendor/github.com/robfig/cron/constantdelay.go
@@ -0,0 +1,27 @@
+package cron
+
+import "time"
+
+// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
+// It does not support jobs more frequent than once a second.
+type ConstantDelaySchedule struct {
+ Delay time.Duration
+}
+
+// Every returns a crontab Schedule that activates once every duration.
+// Delays of less than a second are not supported (will round up to 1 second).
+// Any fields less than a Second are truncated.
+func Every(duration time.Duration) ConstantDelaySchedule {
+ if duration < time.Second {
+ duration = time.Second
+ }
+ return ConstantDelaySchedule{
+ Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
+ }
+}
+
+// Next returns the next time this should be run.
+// This rounds so that the next activation time will be on the second.
+func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
+ return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
+}
diff --git a/vendor/github.com/robfig/cron/cron.go b/vendor/github.com/robfig/cron/cron.go
new file mode 100644
index 0000000000..2318aeb2e7
--- /dev/null
+++ b/vendor/github.com/robfig/cron/cron.go
@@ -0,0 +1,259 @@
+package cron
+
+import (
+ "log"
+ "runtime"
+ "sort"
+ "time"
+)
+
+// Cron keeps track of any number of entries, invoking the associated func as
+// specified by the schedule. It may be started, stopped, and the entries may
+// be inspected while running.
+type Cron struct {
+ entries []*Entry
+ stop chan struct{}
+ add chan *Entry
+ snapshot chan []*Entry
+ running bool
+ ErrorLog *log.Logger
+ location *time.Location
+}
+
+// Job is an interface for submitted cron jobs.
+type Job interface {
+ Run()
+}
+
+// The Schedule describes a job's duty cycle.
+type Schedule interface {
+ // Return the next activation time, later than the given time.
+ // Next is invoked initially, and then each time the job is run.
+ Next(time.Time) time.Time
+}
+
+// Entry consists of a schedule and the func to execute on that schedule.
+type Entry struct {
+ // The schedule on which this job should be run.
+ Schedule Schedule
+
+ // The next time the job will run. This is the zero time if Cron has not been
+ // started or this entry's schedule is unsatisfiable
+ Next time.Time
+
+ // The last time this job was run. This is the zero time if the job has never
+ // been run.
+ Prev time.Time
+
+ // The Job to run.
+ Job Job
+}
+
+// byTime is a wrapper for sorting the entry array by time
+// (with zero time at the end).
+type byTime []*Entry
+
+func (s byTime) Len() int { return len(s) }
+func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byTime) Less(i, j int) bool {
+ // Two zero times should return false.
+ // Otherwise, zero is "greater" than any other time.
+ // (To sort it at the end of the list.)
+ if s[i].Next.IsZero() {
+ return false
+ }
+ if s[j].Next.IsZero() {
+ return true
+ }
+ return s[i].Next.Before(s[j].Next)
+}
+
+// New returns a new Cron job runner, in the Local time zone.
+func New() *Cron {
+ return NewWithLocation(time.Now().Location())
+}
+
+// NewWithLocation returns a new Cron job runner.
+func NewWithLocation(location *time.Location) *Cron {
+ return &Cron{
+ entries: nil,
+ add: make(chan *Entry),
+ stop: make(chan struct{}),
+ snapshot: make(chan []*Entry),
+ running: false,
+ ErrorLog: nil,
+ location: location,
+ }
+}
+
+// A wrapper that turns a func() into a cron.Job
+type FuncJob func()
+
+func (f FuncJob) Run() { f() }
+
+// AddFunc adds a func to the Cron to be run on the given schedule.
+func (c *Cron) AddFunc(spec string, cmd func()) error {
+ return c.AddJob(spec, FuncJob(cmd))
+}
+
+// AddJob adds a Job to the Cron to be run on the given schedule.
+func (c *Cron) AddJob(spec string, cmd Job) error {
+ schedule, err := Parse(spec)
+ if err != nil {
+ return err
+ }
+ c.Schedule(schedule, cmd)
+ return nil
+}
+
+// Schedule adds a Job to the Cron to be run on the given schedule.
+func (c *Cron) Schedule(schedule Schedule, cmd Job) {
+ entry := &Entry{
+ Schedule: schedule,
+ Job: cmd,
+ }
+ if !c.running {
+ c.entries = append(c.entries, entry)
+ return
+ }
+
+ c.add <- entry
+}
+
+// Entries returns a snapshot of the cron entries.
+func (c *Cron) Entries() []*Entry {
+ if c.running {
+ c.snapshot <- nil
+ x := <-c.snapshot
+ return x
+ }
+ return c.entrySnapshot()
+}
+
+// Location gets the time zone location
+func (c *Cron) Location() *time.Location {
+ return c.location
+}
+
+// Start the cron scheduler in its own go-routine, or no-op if already started.
+func (c *Cron) Start() {
+ if c.running {
+ return
+ }
+ c.running = true
+ go c.run()
+}
+
+// Run the cron scheduler, or no-op if already running.
+func (c *Cron) Run() {
+ if c.running {
+ return
+ }
+ c.running = true
+ c.run()
+}
+
+func (c *Cron) runWithRecovery(j Job) {
+ defer func() {
+ if r := recover(); r != nil {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ c.logf("cron: panic running job: %v\n%s", r, buf)
+ }
+ }()
+ j.Run()
+}
+
+// Run the scheduler. this is private just due to the need to synchronize
+// access to the 'running' state variable.
+func (c *Cron) run() {
+ // Figure out the next activation times for each entry.
+ now := c.now()
+ for _, entry := range c.entries {
+ entry.Next = entry.Schedule.Next(now)
+ }
+
+ for {
+ // Determine the next entry to run.
+ sort.Sort(byTime(c.entries))
+
+ var timer *time.Timer
+ if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
+ // If there are no entries yet, just sleep - it still handles new entries
+ // and stop requests.
+ timer = time.NewTimer(100000 * time.Hour)
+ } else {
+ timer = time.NewTimer(c.entries[0].Next.Sub(now))
+ }
+
+ for {
+ select {
+ case now = <-timer.C:
+ now = now.In(c.location)
+ // Run every entry whose next time was less than now
+ for _, e := range c.entries {
+ if e.Next.After(now) || e.Next.IsZero() {
+ break
+ }
+ go c.runWithRecovery(e.Job)
+ e.Prev = e.Next
+ e.Next = e.Schedule.Next(now)
+ }
+
+ case newEntry := <-c.add:
+ timer.Stop()
+ now = c.now()
+ newEntry.Next = newEntry.Schedule.Next(now)
+ c.entries = append(c.entries, newEntry)
+
+ case <-c.snapshot:
+ c.snapshot <- c.entrySnapshot()
+ continue
+
+ case <-c.stop:
+ timer.Stop()
+ return
+ }
+
+ break
+ }
+ }
+}
+
+// Logs an error to stderr or to the configured error log
+func (c *Cron) logf(format string, args ...interface{}) {
+ if c.ErrorLog != nil {
+ c.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// Stop stops the cron scheduler if it is running; otherwise it does nothing.
+func (c *Cron) Stop() {
+ if !c.running {
+ return
+ }
+ c.stop <- struct{}{}
+ c.running = false
+}
+
+// entrySnapshot returns a copy of the current cron entry list.
+func (c *Cron) entrySnapshot() []*Entry {
+ entries := []*Entry{}
+ for _, e := range c.entries {
+ entries = append(entries, &Entry{
+ Schedule: e.Schedule,
+ Next: e.Next,
+ Prev: e.Prev,
+ Job: e.Job,
+ })
+ }
+ return entries
+}
+
+// now returns current time in c location
+func (c *Cron) now() time.Time {
+ return time.Now().In(c.location)
+}
diff --git a/vendor/github.com/robfig/cron/doc.go b/vendor/github.com/robfig/cron/doc.go
new file mode 100644
index 0000000000..d02ec2f3b5
--- /dev/null
+++ b/vendor/github.com/robfig/cron/doc.go
@@ -0,0 +1,129 @@
+/*
+Package cron implements a cron spec parser and job runner.
+
+Usage
+
+Callers may register Funcs to be invoked on a given schedule. Cron will run
+them in their own goroutines.
+
+ c := cron.New()
+ c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") })
+ c.AddFunc("@hourly", func() { fmt.Println("Every hour") })
+ c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") })
+ c.Start()
+ ..
+ // Funcs are invoked in their own goroutine, asynchronously.
+ ...
+ // Funcs may also be added to a running Cron
+ c.AddFunc("@daily", func() { fmt.Println("Every day") })
+ ..
+ // Inspect the cron job entries' next and previous run times.
+ inspect(c.Entries())
+ ..
+ c.Stop() // Stop the scheduler (does not stop any jobs already running).
+
+CRON Expression Format
+
+A cron expression represents a set of times, using 6 space-separated fields.
+
+ Field name | Mandatory? | Allowed values | Allowed special characters
+ ---------- | ---------- | -------------- | --------------------------
+ Seconds | Yes | 0-59 | * / , -
+ Minutes | Yes | 0-59 | * / , -
+ Hours | Yes | 0-23 | * / , -
+ Day of month | Yes | 1-31 | * / , - ?
+ Month | Yes | 1-12 or JAN-DEC | * / , -
+ Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
+
+Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun",
+and "sun" are equally accepted.
+
+Special Characters
+
+Asterisk ( * )
+
+The asterisk indicates that the cron expression will match for all values of the
+field; e.g., using an asterisk in the 5th field (month) would indicate every
+month.
+
+Slash ( / )
+
+Slashes are used to describe increments of ranges. For example 3-59/15 in the
+1st field (minutes) would indicate the 3rd minute of the hour and every 15
+minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
+that is, an increment over the largest possible range of the field. The form
+"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
+increment until the end of that specific range. It does not wrap around.
+
+Comma ( , )
+
+Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
+the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
+
+Hyphen ( - )
+
+Hyphens are used to define ranges. For example, 9-17 would indicate every
+hour between 9am and 5pm inclusive.
+
+Question mark ( ? )
+
+Question mark may be used instead of '*' for leaving either day-of-month or
+day-of-week blank.
+
+Predefined schedules
+
+You may use one of several pre-defined schedules in place of a cron expression.
+
+ Entry | Description | Equivalent To
+ ----- | ----------- | -------------
+ @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 *
+ @monthly | Run once a month, midnight, first of month | 0 0 0 1 * *
+ @weekly | Run once a week, midnight between Sat/Sun | 0 0 0 * * 0
+ @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * *
+ @hourly | Run once an hour, beginning of hour | 0 0 * * * *
+
+Intervals
+
+You may also schedule a job to execute at fixed intervals, starting at the time it's added
+or cron is run. This is supported by formatting the cron spec like this:
+
+ @every
+
+where "duration" is a string accepted by time.ParseDuration
+(http://golang.org/pkg/time/#ParseDuration).
+
+For example, "@every 1h30m10s" would indicate a schedule that activates after
+1 hour, 30 minutes, 10 seconds, and then every interval after that.
+
+Note: The interval does not take the job runtime into account. For example,
+if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
+it will have only 2 minutes of idle time between each run.
+
+Time zones
+
+All interpretation and scheduling is done in the machine's local time zone (as
+provided by the Go time package (http://www.golang.org/pkg/time).
+
+Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
+not be run!
+
+Thread safety
+
+Since the Cron service runs concurrently with the calling code, some amount of
+care must be taken to ensure proper synchronization.
+
+All cron methods are designed to be correctly synchronized as long as the caller
+ensures that invocations have a clear happens-before ordering between them.
+
+Implementation
+
+Cron entries are stored in an array, sorted by their next activation time. Cron
+sleeps until the next job is due to be run.
+
+Upon waking:
+ - it runs each entry that is active on that second
+ - it calculates the next run times for the jobs that were run
+ - it re-sorts the array of entries by next activation time.
+ - it goes to sleep until the soonest job.
+*/
+package cron
diff --git a/vendor/github.com/robfig/cron/parser.go b/vendor/github.com/robfig/cron/parser.go
new file mode 100644
index 0000000000..a5e83c0a8d
--- /dev/null
+++ b/vendor/github.com/robfig/cron/parser.go
@@ -0,0 +1,380 @@
+package cron
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Configuration options for creating a parser. Most options specify which
+// fields should be included, while others enable features. If a field is not
+// included the parser will assume a default value. These options do not change
+// the order fields are parse in.
+type ParseOption int
+
+const (
+ Second ParseOption = 1 << iota // Seconds field, default 0
+ Minute // Minutes field, default 0
+ Hour // Hours field, default 0
+ Dom // Day of month field, default *
+ Month // Month field, default *
+ Dow // Day of week field, default *
+ DowOptional // Optional day of week field, default *
+ Descriptor // Allow descriptors such as @monthly, @weekly, etc.
+)
+
+var places = []ParseOption{
+ Second,
+ Minute,
+ Hour,
+ Dom,
+ Month,
+ Dow,
+}
+
+var defaults = []string{
+ "0",
+ "0",
+ "0",
+ "*",
+ "*",
+ "*",
+}
+
+// A custom Parser that can be configured.
+type Parser struct {
+ options ParseOption
+ optionals int
+}
+
+// Creates a custom Parser with custom options.
+//
+// // Standard parser without descriptors
+// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
+// sched, err := specParser.Parse("0 0 15 */3 *")
+//
+// // Same as above, just excludes time fields
+// subsParser := NewParser(Dom | Month | Dow)
+// sched, err := specParser.Parse("15 */3 *")
+//
+// // Same as above, just makes Dow optional
+// subsParser := NewParser(Dom | Month | DowOptional)
+// sched, err := specParser.Parse("15 */3")
+//
+func NewParser(options ParseOption) Parser {
+ optionals := 0
+ if options&DowOptional > 0 {
+ options |= Dow
+ optionals++
+ }
+ return Parser{options, optionals}
+}
+
+// Parse returns a new crontab schedule representing the given spec.
+// It returns a descriptive error if the spec is not valid.
+// It accepts crontab specs and features configured by NewParser.
+func (p Parser) Parse(spec string) (Schedule, error) {
+ if len(spec) == 0 {
+ return nil, fmt.Errorf("Empty spec string")
+ }
+ if spec[0] == '@' && p.options&Descriptor > 0 {
+ return parseDescriptor(spec)
+ }
+
+ // Figure out how many fields we need
+ max := 0
+ for _, place := range places {
+ if p.options&place > 0 {
+ max++
+ }
+ }
+ min := max - p.optionals
+
+ // Split fields on whitespace
+ fields := strings.Fields(spec)
+
+ // Validate number of fields
+ if count := len(fields); count < min || count > max {
+ if min == max {
+ return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec)
+ }
+ return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec)
+ }
+
+ // Fill in missing fields
+ fields = expandFields(fields, p.options)
+
+ var err error
+ field := func(field string, r bounds) uint64 {
+ if err != nil {
+ return 0
+ }
+ var bits uint64
+ bits, err = getField(field, r)
+ return bits
+ }
+
+ var (
+ second = field(fields[0], seconds)
+ minute = field(fields[1], minutes)
+ hour = field(fields[2], hours)
+ dayofmonth = field(fields[3], dom)
+ month = field(fields[4], months)
+ dayofweek = field(fields[5], dow)
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &SpecSchedule{
+ Second: second,
+ Minute: minute,
+ Hour: hour,
+ Dom: dayofmonth,
+ Month: month,
+ Dow: dayofweek,
+ }, nil
+}
+
+func expandFields(fields []string, options ParseOption) []string {
+ n := 0
+ count := len(fields)
+ expFields := make([]string, len(places))
+ copy(expFields, defaults)
+ for i, place := range places {
+ if options&place > 0 {
+ expFields[i] = fields[n]
+ n++
+ }
+ if n == count {
+ break
+ }
+ }
+ return expFields
+}
+
+var standardParser = NewParser(
+ Minute | Hour | Dom | Month | Dow | Descriptor,
+)
+
+// ParseStandard returns a new crontab schedule representing the given standardSpec
+// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always
+// pass 5 entries representing: minute, hour, day of month, month and day of week,
+// in that order. It returns a descriptive error if the spec is not valid.
+//
+// It accepts
+// - Standard crontab specs, e.g. "* * * * ?"
+// - Descriptors, e.g. "@midnight", "@every 1h30m"
+func ParseStandard(standardSpec string) (Schedule, error) {
+ return standardParser.Parse(standardSpec)
+}
+
+var defaultParser = NewParser(
+ Second | Minute | Hour | Dom | Month | DowOptional | Descriptor,
+)
+
+// Parse returns a new crontab schedule representing the given spec.
+// It returns a descriptive error if the spec is not valid.
+//
+// It accepts
+// - Full crontab specs, e.g. "* * * * * ?"
+// - Descriptors, e.g. "@midnight", "@every 1h30m"
+func Parse(spec string) (Schedule, error) {
+ return defaultParser.Parse(spec)
+}
+
+// getField returns an Int with the bits set representing all of the times that
+// the field represents or error parsing field value. A "field" is a comma-separated
+// list of "ranges".
+func getField(field string, r bounds) (uint64, error) {
+ var bits uint64
+ ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
+ for _, expr := range ranges {
+ bit, err := getRange(expr, r)
+ if err != nil {
+ return bits, err
+ }
+ bits |= bit
+ }
+ return bits, nil
+}
+
+// getRange returns the bits indicated by the given expression:
+// number | number "-" number [ "/" number ]
+// or error parsing range.
+func getRange(expr string, r bounds) (uint64, error) {
+ var (
+ start, end, step uint
+ rangeAndStep = strings.Split(expr, "/")
+ lowAndHigh = strings.Split(rangeAndStep[0], "-")
+ singleDigit = len(lowAndHigh) == 1
+ err error
+ )
+
+ var extra uint64
+ if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
+ start = r.min
+ end = r.max
+ extra = starBit
+ } else {
+ start, err = parseIntOrName(lowAndHigh[0], r.names)
+ if err != nil {
+ return 0, err
+ }
+ switch len(lowAndHigh) {
+ case 1:
+ end = start
+ case 2:
+ end, err = parseIntOrName(lowAndHigh[1], r.names)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("Too many hyphens: %s", expr)
+ }
+ }
+
+ switch len(rangeAndStep) {
+ case 1:
+ step = 1
+ case 2:
+ step, err = mustParseInt(rangeAndStep[1])
+ if err != nil {
+ return 0, err
+ }
+
+ // Special handling: "N/step" means "N-max/step".
+ if singleDigit {
+ end = r.max
+ }
+ default:
+ return 0, fmt.Errorf("Too many slashes: %s", expr)
+ }
+
+ if start < r.min {
+ return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
+ }
+ if end > r.max {
+ return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr)
+ }
+ if start > end {
+ return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
+ }
+ if step == 0 {
+ return 0, fmt.Errorf("Step of range should be a positive number: %s", expr)
+ }
+
+ return getBits(start, end, step) | extra, nil
+}
+
+// parseIntOrName returns the (possibly-named) integer contained in expr.
+func parseIntOrName(expr string, names map[string]uint) (uint, error) {
+ if names != nil {
+ if namedInt, ok := names[strings.ToLower(expr)]; ok {
+ return namedInt, nil
+ }
+ }
+ return mustParseInt(expr)
+}
+
+// mustParseInt parses the given expression as an int or returns an error.
+func mustParseInt(expr string) (uint, error) {
+ num, err := strconv.Atoi(expr)
+ if err != nil {
+ return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err)
+ }
+ if num < 0 {
+ return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr)
+ }
+
+ return uint(num), nil
+}
+
+// getBits sets all bits in the range [min, max], modulo the given step size.
+func getBits(min, max, step uint) uint64 {
+ var bits uint64
+
+ // If step is 1, use shifts.
+ if step == 1 {
+ return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
+ }
+
+ // Else, use a simple loop.
+ for i := min; i <= max; i += step {
+ bits |= 1 << i
+ }
+ return bits
+}
+
+// all returns all bits within the given bounds. (plus the star bit)
+func all(r bounds) uint64 {
+ return getBits(r.min, r.max, 1) | starBit
+}
+
+// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
+func parseDescriptor(descriptor string) (Schedule, error) {
+ switch descriptor {
+ case "@yearly", "@annually":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: 1 << dom.min,
+ Month: 1 << months.min,
+ Dow: all(dow),
+ }, nil
+
+ case "@monthly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: 1 << dom.min,
+ Month: all(months),
+ Dow: all(dow),
+ }, nil
+
+ case "@weekly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: all(dom),
+ Month: all(months),
+ Dow: 1 << dow.min,
+ }, nil
+
+ case "@daily", "@midnight":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: all(dom),
+ Month: all(months),
+ Dow: all(dow),
+ }, nil
+
+ case "@hourly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: all(hours),
+ Dom: all(dom),
+ Month: all(months),
+ Dow: all(dow),
+ }, nil
+ }
+
+ const every = "@every "
+ if strings.HasPrefix(descriptor, every) {
+ duration, err := time.ParseDuration(descriptor[len(every):])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err)
+ }
+ return Every(duration), nil
+ }
+
+ return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor)
+}
diff --git a/vendor/github.com/robfig/cron/spec.go b/vendor/github.com/robfig/cron/spec.go
new file mode 100644
index 0000000000..aac9a60b95
--- /dev/null
+++ b/vendor/github.com/robfig/cron/spec.go
@@ -0,0 +1,158 @@
+package cron
+
+import "time"
+
+// SpecSchedule specifies a duty cycle (to the second granularity), based on a
+// traditional crontab specification. It is computed initially and stored as bit sets.
+type SpecSchedule struct {
+ Second, Minute, Hour, Dom, Month, Dow uint64
+}
+
+// bounds provides a range of acceptable values (plus a map of name to value).
+type bounds struct {
+ min, max uint
+ names map[string]uint
+}
+
+// The bounds for each field.
+var (
+ seconds = bounds{0, 59, nil}
+ minutes = bounds{0, 59, nil}
+ hours = bounds{0, 23, nil}
+ dom = bounds{1, 31, nil}
+ months = bounds{1, 12, map[string]uint{
+ "jan": 1,
+ "feb": 2,
+ "mar": 3,
+ "apr": 4,
+ "may": 5,
+ "jun": 6,
+ "jul": 7,
+ "aug": 8,
+ "sep": 9,
+ "oct": 10,
+ "nov": 11,
+ "dec": 12,
+ }}
+ dow = bounds{0, 6, map[string]uint{
+ "sun": 0,
+ "mon": 1,
+ "tue": 2,
+ "wed": 3,
+ "thu": 4,
+ "fri": 5,
+ "sat": 6,
+ }}
+)
+
+const (
+ // Set the top bit if a star was included in the expression.
+ starBit = 1 << 63
+)
+
+// Next returns the next time this schedule is activated, greater than the given
+// time. If no time can be found to satisfy the schedule, return the zero time.
+func (s *SpecSchedule) Next(t time.Time) time.Time {
+ // General approach:
+ // For Month, Day, Hour, Minute, Second:
+ // Check if the time value matches. If yes, continue to the next field.
+ // If the field doesn't match the schedule, then increment the field until it matches.
+ // While incrementing the field, a wrap-around brings it back to the beginning
+ // of the field list (since it is necessary to re-verify previous field
+ // values)
+
+ // Start at the earliest possible time (the upcoming second).
+ t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
+
+ // This flag indicates whether a field has been incremented.
+ added := false
+
+ // If no time is found within five years, return zero.
+ yearLimit := t.Year() + 5
+
+WRAP:
+ if t.Year() > yearLimit {
+ return time.Time{}
+ }
+
+ // Find the first applicable month.
+ // If it's this month, then do nothing.
+ for 1< 0
+ dowMatch bool = 1< 0
+ )
+ if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
+ return domMatch && dowMatch
+ }
+ return domMatch || dowMatch
+}
diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go
new file mode 100644
index 0000000000..a5669022a0
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=false
+// +k8s:prerelease-lifecycle-gen=true
+
+// +groupName=admission.k8s.io
+
+package v1beta1 // import "k8s.io/api/admission/v1beta1"
diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go
new file mode 100644
index 0000000000..22147cbe94
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go
@@ -0,0 +1,1782 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/api/admission/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+
+ k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} }
+func (*AdmissionRequest) ProtoMessage() {}
+func (*AdmissionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d8f147b43c61e73e, []int{0}
+}
+func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AdmissionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AdmissionRequest.Merge(m, src)
+}
+func (m *AdmissionRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AdmissionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AdmissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo
+
+func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} }
+func (*AdmissionResponse) ProtoMessage() {}
+func (*AdmissionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d8f147b43c61e73e, []int{1}
+}
+func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AdmissionResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AdmissionResponse.Merge(m, src)
+}
+func (m *AdmissionResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AdmissionResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AdmissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo
+
+func (m *AdmissionReview) Reset() { *m = AdmissionReview{} }
+func (*AdmissionReview) ProtoMessage() {}
+func (*AdmissionReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d8f147b43c61e73e, []int{2}
+}
+func (m *AdmissionReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AdmissionReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AdmissionReview.Merge(m, src)
+}
+func (m *AdmissionReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *AdmissionReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_AdmissionReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest")
+ proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse")
+ proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry")
+ proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview")
+}
+
+func init() {
+ proto.RegisterFile("k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_d8f147b43c61e73e)
+}
+
+var fileDescriptor_d8f147b43c61e73e = []byte{
+ // 911 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
+ 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xb4, 0xb2, 0xaa, 0xb5, 0xc9, 0x01,
+ 0x19, 0xa9, 0x9d, 0x25, 0x11, 0x54, 0x51, 0xc5, 0x25, 0x4b, 0x22, 0x14, 0x90, 0x9a, 0x68, 0x5a,
+ 0x43, 0xe1, 0x80, 0x34, 0xb6, 0xa7, 0xf6, 0x60, 0x7b, 0x66, 0xd9, 0x99, 0x4d, 0xf0, 0x8d, 0x3b,
+ 0x17, 0xbe, 0x01, 0x5f, 0x80, 0x6f, 0xc1, 0x25, 0xc7, 0x1e, 0x7b, 0xb2, 0x88, 0xf9, 0x16, 0x39,
+ 0xa1, 0x99, 0x9d, 0xf5, 0x3a, 0x4e, 0x52, 0xfa, 0xef, 0x94, 0x7d, 0x7f, 0x7e, 0xbf, 0xf7, 0xf2,
+ 0x7b, 0xfb, 0xde, 0x1a, 0xdc, 0x1f, 0xef, 0x4a, 0xc4, 0x44, 0x40, 0x22, 0x16, 0x90, 0xc1, 0x94,
+ 0x49, 0xc9, 0x04, 0x0f, 0x4e, 0xb6, 0x7b, 0x54, 0x91, 0xed, 0x60, 0x48, 0x39, 0x8d, 0x89, 0xa2,
+ 0x03, 0x14, 0xc5, 0x42, 0x09, 0x78, 0x2f, 0xcd, 0x46, 0x24, 0x62, 0x68, 0x99, 0x8d, 0x6c, 0x76,
+ 0xf3, 0xc1, 0x90, 0xa9, 0x51, 0xd2, 0x43, 0x7d, 0x31, 0x0d, 0x86, 0x62, 0x28, 0x02, 0x03, 0xea,
+ 0x25, 0xcf, 0x8d, 0x65, 0x0c, 0xf3, 0x94, 0x92, 0x35, 0x2f, 0x95, 0x4e, 0xd4, 0x88, 0x72, 0xc5,
+ 0xfa, 0x44, 0xa5, 0xf5, 0xd7, 0x4b, 0x37, 0x3f, 0xcf, 0xb3, 0xa7, 0xa4, 0x3f, 0x62, 0x9c, 0xc6,
+ 0xb3, 0x20, 0x1a, 0x0f, 0xb5, 0x43, 0x06, 0x53, 0xaa, 0xc8, 0x75, 0xa8, 0xe0, 0x26, 0x54, 0x9c,
+ 0x70, 0xc5, 0xa6, 0xf4, 0x0a, 0xe0, 0xe1, 0xff, 0x01, 0x64, 0x7f, 0x44, 0xa7, 0x64, 0x1d, 0xb7,
+ 0xf5, 0xa7, 0x0b, 0x1a, 0x7b, 0x99, 0x22, 0x98, 0xfe, 0x92, 0x50, 0xa9, 0x60, 0x08, 0x8a, 0x09,
+ 0x1b, 0x78, 0x4e, 0xdb, 0xe9, 0xb8, 0xe1, 0x67, 0x67, 0xf3, 0x56, 0x61, 0x31, 0x6f, 0x15, 0xbb,
+ 0x87, 0xfb, 0x17, 0xf3, 0xd6, 0xc7, 0x37, 0x15, 0x52, 0xb3, 0x88, 0x4a, 0xd4, 0x3d, 0xdc, 0xc7,
+ 0x1a, 0x0c, 0x9f, 0x81, 0xd2, 0x98, 0xf1, 0x81, 0x77, 0xab, 0xed, 0x74, 0x6a, 0x3b, 0x0f, 0x51,
+ 0x3e, 0x81, 0x25, 0x0c, 0x45, 0xe3, 0xa1, 0x76, 0x48, 0xa4, 0x65, 0x40, 0x27, 0xdb, 0xe8, 0xeb,
+ 0x58, 0x24, 0xd1, 0x77, 0x34, 0xd6, 0xcd, 0x7c, 0xcb, 0xf8, 0x20, 0xdc, 0xb4, 0xc5, 0x4b, 0xda,
+ 0xc2, 0x86, 0x11, 0x8e, 0x40, 0x35, 0xa6, 0x52, 0x24, 0x71, 0x9f, 0x7a, 0x45, 0xc3, 0xfe, 0xe8,
+ 0xcd, 0xd9, 0xb1, 0x65, 0x08, 0x1b, 0xb6, 0x42, 0x35, 0xf3, 0xe0, 0x25, 0x3b, 0xfc, 0x02, 0xd4,
+ 0x64, 0xd2, 0xcb, 0x02, 0x5e, 0xc9, 0xe8, 0x71, 0xd7, 0x02, 0x6a, 0x4f, 0xf2, 0x10, 0x5e, 0xcd,
+ 0x83, 0x0c, 0xd4, 0xe2, 0x54, 0x49, 0xdd, 0xb5, 0xf7, 0xc1, 0x3b, 0x29, 0x50, 0xd7, 0xa5, 0x70,
+ 0x4e, 0x87, 0x57, 0xb9, 0xe1, 0x0c, 0xd4, 0xad, 0xb9, 0xec, 0xf2, 0xf6, 0x3b, 0x4b, 0x72, 0x77,
+ 0x31, 0x6f, 0xd5, 0xf1, 0x65, 0x5a, 0xbc, 0x5e, 0x07, 0x7e, 0x03, 0xa0, 0x75, 0xad, 0x08, 0xe1,
+ 0xd5, 0x8d, 0x46, 0x4d, 0xab, 0x11, 0xc4, 0x57, 0x32, 0xf0, 0x35, 0x28, 0xd8, 0x06, 0x25, 0x4e,
+ 0xa6, 0xd4, 0xdb, 0x30, 0xe8, 0xe5, 0xd0, 0x1f, 0x93, 0x29, 0xc5, 0x26, 0x02, 0x03, 0xe0, 0xea,
+ 0xbf, 0x32, 0x22, 0x7d, 0xea, 0x95, 0x4d, 0xda, 0x1d, 0x9b, 0xe6, 0x3e, 0xce, 0x02, 0x38, 0xcf,
+ 0x81, 0x5f, 0x02, 0x57, 0x44, 0xfa, 0x55, 0x67, 0x82, 0x7b, 0x15, 0x03, 0xf0, 0x33, 0xc0, 0x51,
+ 0x16, 0xb8, 0x58, 0x35, 0x70, 0x0e, 0x80, 0x4f, 0x41, 0x35, 0x91, 0x34, 0x3e, 0xe4, 0xcf, 0x85,
+ 0x57, 0x35, 0x82, 0x7e, 0x82, 0x56, 0x6f, 0xc8, 0xa5, 0xb5, 0xd7, 0x42, 0x76, 0x6d, 0x76, 0xfe,
+ 0x3e, 0x65, 0x1e, 0xbc, 0x64, 0x82, 0x5d, 0x50, 0x16, 0xbd, 0x9f, 0x69, 0x5f, 0x79, 0xae, 0xe1,
+ 0x7c, 0x70, 0xe3, 0x90, 0xec, 0xd6, 0x22, 0x4c, 0x4e, 0x0f, 0x7e, 0x55, 0x94, 0xeb, 0xf9, 0x84,
+ 0xb7, 0x2d, 0x75, 0xf9, 0xc8, 0x90, 0x60, 0x4b, 0x06, 0x7f, 0x02, 0xae, 0x98, 0x0c, 0x52, 0xa7,
+ 0x07, 0xde, 0x86, 0x79, 0x29, 0xe5, 0x51, 0xc6, 0x83, 0x73, 0x4a, 0xb8, 0x05, 0xca, 0x83, 0x78,
+ 0x86, 0x13, 0xee, 0xd5, 0xda, 0x4e, 0xa7, 0x1a, 0x02, 0xdd, 0xc3, 0xbe, 0xf1, 0x60, 0x1b, 0x81,
+ 0xcf, 0x40, 0x45, 0x44, 0x5a, 0x0c, 0xe9, 0x6d, 0xbe, 0x4d, 0x07, 0x75, 0xdb, 0x41, 0xe5, 0x28,
+ 0x65, 0xc1, 0x19, 0xdd, 0xd6, 0x5f, 0x25, 0x70, 0x67, 0xe5, 0x42, 0xc9, 0x48, 0x70, 0x49, 0xdf,
+ 0xcb, 0x89, 0xfa, 0x14, 0x54, 0xc8, 0x64, 0x22, 0x4e, 0x69, 0x7a, 0xa5, 0xaa, 0x79, 0x13, 0x7b,
+ 0xa9, 0x1b, 0x67, 0x71, 0x78, 0x0c, 0xca, 0x52, 0x11, 0x95, 0x48, 0x7b, 0x71, 0xee, 0xbf, 0xde,
+ 0x7a, 0x3d, 0x31, 0x98, 0x54, 0x30, 0x4c, 0x65, 0x32, 0x51, 0xd8, 0xf2, 0xc0, 0x16, 0xd8, 0x88,
+ 0x88, 0xea, 0x8f, 0xcc, 0x55, 0xd9, 0x0c, 0xdd, 0xc5, 0xbc, 0xb5, 0x71, 0xac, 0x1d, 0x38, 0xf5,
+ 0xc3, 0x5d, 0xe0, 0x9a, 0x87, 0xa7, 0xb3, 0x28, 0x5b, 0x8c, 0xa6, 0x1e, 0xd1, 0x71, 0xe6, 0xbc,
+ 0x58, 0x35, 0x70, 0x9e, 0x0c, 0x7f, 0x77, 0x40, 0x83, 0x24, 0x03, 0xa6, 0xf6, 0x38, 0x17, 0x8a,
+ 0xa4, 0x53, 0x29, 0xb7, 0x8b, 0x9d, 0xda, 0xce, 0x01, 0x7a, 0xd5, 0x97, 0x10, 0x5d, 0xd1, 0x19,
+ 0xed, 0xad, 0xf1, 0x1c, 0x70, 0x15, 0xcf, 0x42, 0xcf, 0x0a, 0xd5, 0x58, 0x0f, 0xe3, 0x2b, 0x85,
+ 0x61, 0x07, 0x54, 0x4f, 0x49, 0xcc, 0x19, 0x1f, 0x4a, 0xaf, 0xd2, 0x2e, 0xea, 0xfd, 0xd6, 0xeb,
+ 0xf1, 0xbd, 0xf5, 0xe1, 0x65, 0xb4, 0xf9, 0x15, 0xf8, 0xe8, 0xda, 0x72, 0xb0, 0x01, 0x8a, 0x63,
+ 0x3a, 0x4b, 0x87, 0x8d, 0xf5, 0x23, 0xfc, 0x10, 0x6c, 0x9c, 0x90, 0x49, 0x42, 0xcd, 0xe0, 0x5c,
+ 0x9c, 0x1a, 0x8f, 0x6e, 0xed, 0x3a, 0x5b, 0x7f, 0x3b, 0xa0, 0xbe, 0xf2, 0x6f, 0x9c, 0x30, 0x7a,
+ 0x0a, 0xbb, 0xa0, 0x62, 0x8f, 0x8e, 0xe1, 0xa8, 0xed, 0xa0, 0xd7, 0x96, 0xc1, 0xa0, 0xc2, 0x9a,
+ 0x7e, 0x29, 0xb2, 0x8b, 0x98, 0x71, 0xc1, 0x1f, 0xcc, 0x87, 0xc8, 0xe8, 0x64, 0x3f, 0x73, 0xc1,
+ 0x1b, 0xca, 0x9b, 0x4a, 0x91, 0x59, 0x78, 0x49, 0x17, 0x86, 0x67, 0xe7, 0x7e, 0xe1, 0xc5, 0xb9,
+ 0x5f, 0x78, 0x79, 0xee, 0x17, 0x7e, 0x5b, 0xf8, 0xce, 0xd9, 0xc2, 0x77, 0x5e, 0x2c, 0x7c, 0xe7,
+ 0xe5, 0xc2, 0x77, 0xfe, 0x59, 0xf8, 0xce, 0x1f, 0xff, 0xfa, 0x85, 0x1f, 0xef, 0xbd, 0xea, 0x47,
+ 0xd0, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x6e, 0x31, 0x41, 0x23, 0x09, 0x00, 0x00,
+}
+
+func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.RequestSubResource)
+ copy(dAtA[i:], m.RequestSubResource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource)))
+ i--
+ dAtA[i] = 0x7a
+ if m.RequestResource != nil {
+ {
+ size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ if m.RequestKind != nil {
+ {
+ size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x62
+ if m.DryRun != nil {
+ i--
+ if *m.DryRun {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x58
+ }
+ {
+ size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ {
+ size, err := m.Object.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ {
+ size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ i -= len(m.Operation)
+ copy(dAtA[i:], m.Operation)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.SubResource)
+ copy(dAtA[i:], m.SubResource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Warnings) > 0 {
+ for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Warnings[iNdEx])
+ copy(dAtA[i:], m.Warnings[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx])))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.AuditAnnotations) > 0 {
+ keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations))
+ for k := range m.AuditAnnotations {
+ keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAuditAnnotations[iNdEx])
+ copy(dAtA[i:], keysForAuditAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.PatchType != nil {
+ i -= len(*m.PatchType)
+ copy(dAtA[i:], *m.PatchType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Patch != nil {
+ i -= len(m.Patch)
+ copy(dAtA[i:], m.Patch)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Result != nil {
+ {
+ size, err := m.Result.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i--
+ if m.Allowed {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.UID)
+ copy(dAtA[i:], m.UID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *AdmissionReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Response != nil {
+ {
+ size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Request != nil {
+ {
+ size, err := m.Request.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *AdmissionRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Kind.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Resource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.SubResource)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Operation)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.UserInfo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Object.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.OldObject.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.DryRun != nil {
+ n += 2
+ }
+ l = m.Options.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.RequestKind != nil {
+ l = m.RequestKind.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.RequestResource != nil {
+ l = m.RequestResource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.RequestSubResource)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *AdmissionResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.UID)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.Result != nil {
+ l = m.Result.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Patch != nil {
+ l = len(m.Patch)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.PatchType != nil {
+ l = len(*m.PatchType)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.AuditAnnotations) > 0 {
+ for k, v := range m.AuditAnnotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Warnings) > 0 {
+ for _, s := range m.Warnings {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AdmissionReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Request != nil {
+ l = m.Request.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Response != nil {
+ l = m.Response.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AdmissionRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AdmissionRequest{`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`,
+ `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`,
+ `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`,
+ `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`,
+ `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `DryRun:` + valueToStringGenerated(this.DryRun) + `,`,
+ `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+ `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`,
+ `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`,
+ `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AdmissionResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations))
+ for k := range this.AuditAnnotations {
+ keysForAuditAnnotations = append(keysForAuditAnnotations, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ mapStringForAuditAnnotations := "map[string]string{"
+ for _, k := range keysForAuditAnnotations {
+ mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
+ }
+ mapStringForAuditAnnotations += "}"
+ s := strings.Join([]string{`&AdmissionResponse{`,
+ `UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+ `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`,
+ `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`,
+ `Patch:` + valueToStringGenerated(this.Patch) + `,`,
+ `PatchType:` + valueToStringGenerated(this.PatchType) + `,`,
+ `AuditAnnotations:` + mapStringForAuditAnnotations + `,`,
+ `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *AdmissionReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AdmissionReview{`,
+ `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`,
+ `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *AdmissionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubResource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Operation = Operation(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.DryRun = &b
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RequestKind == nil {
+ m.RequestKind = &v1.GroupVersionKind{}
+ }
+ if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RequestResource == nil {
+ m.RequestResource = &v1.GroupVersionResource{}
+ }
+ if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RequestSubResource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AdmissionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Allowed = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Result == nil {
+ m.Result = &v1.Status{}
+ }
+ if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...)
+ if m.Patch == nil {
+ m.Patch = []byte{}
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := PatchType(dAtA[iNdEx:postIndex])
+ m.PatchType = &s
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuditAnnotations == nil {
+ m.AuditAnnotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.AuditAnnotations[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AdmissionReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Request == nil {
+ m.Request = &AdmissionRequest{}
+ }
+ if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Response == nil {
+ m.Response = &AdmissionResponse{}
+ }
+ if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto
new file mode 100644
index 0000000000..d27c05b727
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto
@@ -0,0 +1,167 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.api.admission.v1beta1;
+
+import "k8s.io/api/authentication/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/api/admission/v1beta1";
+
+// AdmissionRequest describes the admission.Attributes for the admission request.
+message AdmissionRequest {
+ // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+ // otherwise identical (parallel requests, requests when earlier requests did not modify etc)
+ // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
+ // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
+ optional string uid = 1;
+
+ // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
+
+ // Resource is the fully-qualified resource being requested (for example, v1.pods)
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
+
+ // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+ // +optional
+ optional string subResource = 4;
+
+ // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+ // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
+ //
+ // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+ // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+ // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+ // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for),
+ // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request).
+ //
+ // See documentation for the "matchPolicy" field in the webhook configuration type for more details.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
+
+ // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+ // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
+ //
+ // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+ // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+ // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+ // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
+ // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
+ //
+ // See documentation for the "matchPolicy" field in the webhook configuration type.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
+
+ // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+ // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
+ // See documentation for the "matchPolicy" field in the webhook configuration type.
+ // +optional
+ optional string requestSubResource = 15;
+
+ // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
+ // rely on the server to generate the name. If that is the case, this field will contain an empty string.
+ // +optional
+ optional string name = 5;
+
+ // Namespace is the namespace associated with the request (if any).
+ // +optional
+ optional string namespace = 6;
+
+ // Operation is the operation being performed. This may be different than the operation
+ // requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
+ optional string operation = 7;
+
+ // UserInfo is information about the requesting user
+ optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8;
+
+ // Object is the object from the incoming request.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
+
+ // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
+
+ // DryRun indicates that modifications will definitely not be persisted for this request.
+ // Defaults to false.
+ // +optional
+ optional bool dryRun = 11;
+
+ // Options is the operation option structure of the operation being performed.
+ // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
+ // different than the options the caller provided. e.g. for a patch request the performed
+ // Operation might be a CREATE, in which case the Options will a
+ // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12;
+}
+
+// AdmissionResponse describes an admission response.
+message AdmissionResponse {
+ // UID is an identifier for the individual request/response.
+ // This should be copied over from the corresponding AdmissionRequest.
+ optional string uid = 1;
+
+ // Allowed indicates whether or not the admission request was permitted.
+ optional bool allowed = 2;
+
+ // Result contains extra details into why an admission request was denied.
+ // This field IS NOT consulted in any way if "Allowed" is "true".
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
+
+ // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+ // +optional
+ optional bytes patch = 4;
+
+ // The type of Patch. Currently we only allow "JSONPatch".
+ // +optional
+ optional string patchType = 5;
+
+ // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+ // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
+ // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
+ // the admission webhook to add additional context to the audit log for this request.
+ // +optional
+ map auditAnnotations = 6;
+
+ // warnings is a list of warning messages to return to the requesting API client.
+ // Warning messages describe a problem the client making the API request should correct or be aware of.
+ // Limit warnings to 120 characters if possible.
+ // Warnings over 256 characters and large numbers of warnings may be truncated.
+ // +optional
+ repeated string warnings = 7;
+}
+
+// AdmissionReview describes an admission review request/response.
+message AdmissionReview {
+ // Request describes the attributes for the admission request.
+ // +optional
+ optional AdmissionRequest request = 1;
+
+ // Response describes the attributes for the admission response.
+ // +optional
+ optional AdmissionResponse response = 2;
+}
+
diff --git a/vendor/k8s.io/api/admission/v1beta1/register.go b/vendor/k8s.io/api/admission/v1beta1/register.go
new file mode 100644
index 0000000000..1c53e755dd
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "admission.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+var (
+ // SchemeBuilder points to a list of functions added to Scheme.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme.
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &AdmissionReview{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go
new file mode 100644
index 0000000000..00c619d998
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/types.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ authenticationv1 "k8s.io/api/authentication/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.9
+// +k8s:prerelease-lifecycle-gen:deprecated=1.19
+// This API is never server served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally
+// and having the generator against this group will protect future APIs which may be served.
+// +k8s:prerelease-lifecycle-gen:replacement=admission.k8s.io,v1,AdmissionReview
+
+// AdmissionReview describes an admission review request/response.
+type AdmissionReview struct {
+ metav1.TypeMeta `json:",inline"`
+ // Request describes the attributes for the admission request.
+ // +optional
+ Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"`
+ // Response describes the attributes for the admission response.
+ // +optional
+ Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"`
+}
+
+// AdmissionRequest describes the admission.Attributes for the admission request.
+type AdmissionRequest struct {
+ // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+ // otherwise identical (parallel requests, requests when earlier requests did not modify etc)
+ // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
+ // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
+ UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
+ // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+ Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+ // Resource is the fully-qualified resource being requested (for example, v1.pods)
+ Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"`
+ // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+ // +optional
+ SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"`
+
+ // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+ // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
+ //
+ // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+ // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+ // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+ // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for),
+ // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request).
+ //
+ // See documentation for the "matchPolicy" field in the webhook configuration type for more details.
+ // +optional
+ RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"`
+ // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+ // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
+ //
+ // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+ // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+ // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+ // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
+ // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
+ //
+ // See documentation for the "matchPolicy" field in the webhook configuration type.
+ // +optional
+ RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"`
+ // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+ // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
+ // See documentation for the "matchPolicy" field in the webhook configuration type.
+ // +optional
+ RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"`
+
+ // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
+ // rely on the server to generate the name. If that is the case, this field will contain an empty string.
+ // +optional
+ Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
+ // Namespace is the namespace associated with the request (if any).
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
+ // Operation is the operation being performed. This may be different than the operation
+ // requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
+ Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"`
+ // UserInfo is information about the requesting user
+ UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"`
+ // Object is the object from the incoming request.
+ // +optional
+ Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"`
+ // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+ // +optional
+ OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"`
+ // DryRun indicates that modifications will definitely not be persisted for this request.
+ // Defaults to false.
+ // +optional
+ DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"`
+ // Options is the operation option structure of the operation being performed.
+ // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
+ // different than the options the caller provided. e.g. for a patch request the performed
+ // Operation might be a CREATE, in which case the Options will a
+ // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
+ // +optional
+ Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"`
+}
+
+// AdmissionResponse describes an admission response.
+type AdmissionResponse struct {
+ // UID is an identifier for the individual request/response.
+ // This should be copied over from the corresponding AdmissionRequest.
+ UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
+
+ // Allowed indicates whether or not the admission request was permitted.
+ Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
+
+ // Result contains extra details into why an admission request was denied.
+ // This field IS NOT consulted in any way if "Allowed" is "true".
+ // +optional
+ Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+
+ // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+ // +optional
+ Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"`
+
+ // The type of Patch. Currently we only allow "JSONPatch".
+ // +optional
+ PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"`
+
+ // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+ // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
+ // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
+ // the admission webhook to add additional context to the audit log for this request.
+ // +optional
+ AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"`
+
+ // warnings is a list of warning messages to return to the requesting API client.
+ // Warning messages describe a problem the client making the API request should correct or be aware of.
+ // Limit warnings to 120 characters if possible.
+ // Warnings over 256 characters and large numbers of warnings may be truncated.
+ // +optional
+ Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"`
+}
+
+// PatchType is the type of patch being used to represent the mutated object
+type PatchType string
+
+// PatchType constants.
+const (
+ PatchTypeJSONPatch PatchType = "JSONPatch"
+)
+
+// Operation is the type of resource operation being checked for admission control
+type Operation string
+
+// Operation constants
+const (
+ Create Operation = "CREATE"
+ Update Operation = "UPDATE"
+ Delete Operation = "DELETE"
+ Connect Operation = "CONNECT"
+)
diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000000..82598ed573
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,78 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-codegen.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AdmissionRequest = map[string]string{
+ "": "AdmissionRequest describes the admission.Attributes for the admission request.",
+ "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
+ "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)",
+ "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)",
+ "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")",
+ "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.",
+ "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.",
+ "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.",
+ "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.",
+ "namespace": "Namespace is the namespace associated with the request (if any).",
+ "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.",
+ "userInfo": "UserInfo is information about the requesting user",
+ "object": "Object is the object from the incoming request.",
+ "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.",
+ "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
+ "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.",
+}
+
+func (AdmissionRequest) SwaggerDoc() map[string]string {
+ return map_AdmissionRequest
+}
+
+var map_AdmissionResponse = map[string]string{
+ "": "AdmissionResponse describes an admission response.",
+ "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.",
+ "allowed": "Allowed indicates whether or not the admission request was permitted.",
+ "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
+ "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
+ "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".",
+ "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
+ "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.",
+}
+
+func (AdmissionResponse) SwaggerDoc() map[string]string {
+ return map_AdmissionResponse
+}
+
+var map_AdmissionReview = map[string]string{
+ "": "AdmissionReview describes an admission review request/response.",
+ "request": "Request describes the attributes for the admission request.",
+ "response": "Response describes the attributes for the admission response.",
+}
+
+func (AdmissionReview) SwaggerDoc() map[string]string {
+ return map_AdmissionReview
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..8234b322f9
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,142 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) {
+ *out = *in
+ out.Kind = in.Kind
+ out.Resource = in.Resource
+ if in.RequestKind != nil {
+ in, out := &in.RequestKind, &out.RequestKind
+ *out = new(v1.GroupVersionKind)
+ **out = **in
+ }
+ if in.RequestResource != nil {
+ in, out := &in.RequestResource, &out.RequestResource
+ *out = new(v1.GroupVersionResource)
+ **out = **in
+ }
+ in.UserInfo.DeepCopyInto(&out.UserInfo)
+ in.Object.DeepCopyInto(&out.Object)
+ in.OldObject.DeepCopyInto(&out.OldObject)
+ if in.DryRun != nil {
+ in, out := &in.DryRun, &out.DryRun
+ *out = new(bool)
+ **out = **in
+ }
+ in.Options.DeepCopyInto(&out.Options)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest.
+func (in *AdmissionRequest) DeepCopy() *AdmissionRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) {
+ *out = *in
+ if in.Result != nil {
+ in, out := &in.Result, &out.Result
+ *out = new(v1.Status)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Patch != nil {
+ in, out := &in.Patch, &out.Patch
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.PatchType != nil {
+ in, out := &in.PatchType, &out.PatchType
+ *out = new(PatchType)
+ **out = **in
+ }
+ if in.AuditAnnotations != nil {
+ in, out := &in.AuditAnnotations, &out.AuditAnnotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Warnings != nil {
+ in, out := &in.Warnings, &out.Warnings
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse.
+func (in *AdmissionResponse) DeepCopy() *AdmissionResponse {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionResponse)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Request != nil {
+ in, out := &in.Request, &out.Request
+ *out = new(AdmissionRequest)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Response != nil {
+ in, out := &in.Response, &out.Response
+ *out = new(AdmissionResponse)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview.
+func (in *AdmissionReview) DeepCopy() *AdmissionReview {
+ if in == nil {
+ return nil
+ }
+ out := new(AdmissionReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AdmissionReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 0000000000..f96e8a4433
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,50 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) {
+ return 1, 9
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *AdmissionReview) APILifecycleDeprecated() (major, minor int) {
+ return 1, 19
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *AdmissionReview) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "admission.k8s.io", Version: "v1", Kind: "AdmissionReview"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *AdmissionReview) APILifecycleRemoved() (major, minor int) {
+ return 1, 22
+}
diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/k8s.io/apiserver/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go
new file mode 100644
index 0000000000..570c51ae99
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package user contains utilities for dealing with simple user exchange in the auth
+// packages. The user.Info interface defines an interface for exchanging that info.
+package user
diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go
new file mode 100644
index 0000000000..1af6f2b277
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package user
+
+// Info describes a user that has been authenticated to the system.
+type Info interface {
+ // GetName returns the name that uniquely identifies this user among all
+ // other active users.
+ GetName() string
+ // GetUID returns a unique value for a particular user that will change
+ // if the user is removed from the system and another user is added with
+ // the same name.
+ GetUID() string
+ // GetGroups returns the names of the groups the user is a member of
+ GetGroups() []string
+
+ // GetExtra can contain any additional information that the authenticator
+ // thought was interesting. One example would be scopes on a token.
+ // Keys in this map should be namespaced to the authenticator or
+ // authenticator/authorizer pair making use of them.
+ // For instance: "example.org/foo" instead of "foo"
+ // This is a map[string][]string because it needs to be serializeable into
+ // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization
+ // delegation flows
+ // In order to faithfully round-trip through an impersonation flow, these keys
+ // MUST be lowercase.
+ GetExtra() map[string][]string
+}
+
+// DefaultInfo provides a simple user information exchange object
+// for components that implement the UserInfo interface.
+type DefaultInfo struct {
+ Name string
+ UID string
+ Groups []string
+ Extra map[string][]string
+}
+
+func (i *DefaultInfo) GetName() string {
+ return i.Name
+}
+
+func (i *DefaultInfo) GetUID() string {
+ return i.UID
+}
+
+func (i *DefaultInfo) GetGroups() []string {
+ return i.Groups
+}
+
+func (i *DefaultInfo) GetExtra() map[string][]string {
+ return i.Extra
+}
+
+const (
+ // well-known user and group names
+ SystemPrivilegedGroup = "system:masters"
+ NodesGroup = "system:nodes"
+ MonitoringGroup = "system:monitoring"
+ AllUnauthenticated = "system:unauthenticated"
+ AllAuthenticated = "system:authenticated"
+
+ Anonymous = "system:anonymous"
+ APIServerUser = "system:apiserver"
+
+ // core kubernetes process identities
+ KubeProxy = "system:kube-proxy"
+ KubeControllerManager = "system:kube-controller-manager"
+ KubeScheduler = "system:kube-scheduler"
+
+ // CredentialIDKey is the key used in a user's "extra" to specify the unique
+ // identifier for this identity document).
+ CredentialIDKey = "authentication.kubernetes.io/credential-id"
+)
diff --git a/vendor/k8s.io/component-base/LICENSE b/vendor/k8s.io/component-base/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/k8s.io/component-base/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/component-base/metrics/OWNERS b/vendor/k8s.io/component-base/metrics/OWNERS
new file mode 100644
index 0000000000..be371a4a09
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - sig-instrumentation-approvers
+ - logicalhan
+ - RainbowMango
+reviewers:
+ - sig-instrumentation-reviewers
+ - YoyinZyc
+labels:
+ - sig/instrumentation
diff --git a/vendor/k8s.io/component-base/metrics/buckets.go b/vendor/k8s.io/component-base/metrics/buckets.go
new file mode 100644
index 0000000000..27a57eb7f8
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/buckets.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// DefBuckets is a wrapper for prometheus.DefBuckets
+var DefBuckets = prometheus.DefBuckets
+
+// LinearBuckets is a wrapper for prometheus.LinearBuckets.
+func LinearBuckets(start, width float64, count int) []float64 {
+ return prometheus.LinearBuckets(start, width, count)
+}
+
+// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ return prometheus.ExponentialBuckets(start, factor, count)
+}
+
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(min, max float64, count int) []float64 {
+ return prometheus.ExponentialBucketsRange(min, max, count)
+}
+
+// MergeBuckets merges buckets together
+func MergeBuckets(buckets ...[]float64) []float64 {
+ result := make([]float64, 1)
+ for _, s := range buckets {
+ result = append(result, s...)
+ }
+ return result
+}
diff --git a/vendor/k8s.io/component-base/metrics/collector.go b/vendor/k8s.io/component-base/metrics/collector.go
new file mode 100644
index 0000000000..0718b6e135
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/collector.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// StableCollector extends the prometheus.Collector interface to allow customization of the
+// metric registration process, it's especially intend to be used in scenario of custom collector.
+type StableCollector interface {
+ prometheus.Collector
+
+ // DescribeWithStability sends the super-set of all possible metrics.Desc collected
+ // by this StableCollector to the provided channel.
+ DescribeWithStability(chan<- *Desc)
+
+ // CollectWithStability sends each collected metrics.Metric via the provide channel.
+ CollectWithStability(chan<- Metric)
+
+ // Create will initialize all Desc and it intends to be called by registry.
+ Create(version *semver.Version, self StableCollector) bool
+
+ // ClearState will clear all the states marked by Create.
+ ClearState()
+
+ // HiddenMetrics tells the list of hidden metrics with fqName.
+ HiddenMetrics() []string
+}
+
+// BaseStableCollector which implements almost all methods defined by StableCollector
+// is a convenient assistant for custom collectors.
+// It is recommended to inherit BaseStableCollector when implementing custom collectors.
+type BaseStableCollector struct {
+ descriptors map[string]*Desc // stores all descriptors by pair, these are collected from DescribeWithStability().
+ registerable map[string]*Desc // stores registerable descriptors by pair, is a subset of descriptors.
+ hidden map[string]*Desc // stores hidden descriptors by pair, is a subset of descriptors.
+ self StableCollector
+}
+
+// DescribeWithStability sends all descriptors to the provided channel.
+// Every custom collector should over-write this method.
+func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) {
+ panic(fmt.Errorf("custom collector should over-write DescribeWithStability method"))
+}
+
+// Describe sends all descriptors to the provided channel.
+// It intended to be called by prometheus registry.
+func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) {
+ for _, d := range bsc.registerable {
+ ch <- d.toPrometheusDesc()
+ }
+}
+
+// CollectWithStability sends all metrics to the provided channel.
+// Every custom collector should over-write this method.
+func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) {
+ panic(fmt.Errorf("custom collector should over-write CollectWithStability method"))
+}
+
+// Collect is called by the Prometheus registry when collecting metrics.
+func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) {
+ mch := make(chan Metric)
+
+ go func() {
+ bsc.self.CollectWithStability(mch)
+ close(mch)
+ }()
+
+ for m := range mch {
+ // nil Metric usually means hidden metrics
+ if m == nil {
+ continue
+ }
+
+ ch <- prometheus.Metric(m)
+ }
+}
+
+func (bsc *BaseStableCollector) add(d *Desc) {
+ if len(d.fqName) == 0 {
+ panic("nameless metrics will be not allowed")
+ }
+
+ if bsc.descriptors == nil {
+ bsc.descriptors = make(map[string]*Desc)
+ }
+
+ if _, exist := bsc.descriptors[d.fqName]; exist {
+ panic(fmt.Sprintf("duplicate metrics (%s) will be not allowed", d.fqName))
+ }
+
+ bsc.descriptors[d.fqName] = d
+}
+
+// Init intends to be called by registry.
+func (bsc *BaseStableCollector) init(self StableCollector) {
+ bsc.self = self
+
+ dch := make(chan *Desc)
+
+ // collect all possible descriptions from custom side
+ go func() {
+ bsc.self.DescribeWithStability(dch)
+ close(dch)
+ }()
+
+ for d := range dch {
+ bsc.add(d)
+ }
+}
+
+func (bsc *BaseStableCollector) trackRegistrableDescriptor(d *Desc) {
+ if bsc.registerable == nil {
+ bsc.registerable = make(map[string]*Desc)
+ }
+
+ bsc.registerable[d.fqName] = d
+}
+
+func (bsc *BaseStableCollector) trackHiddenDescriptor(d *Desc) {
+ if bsc.hidden == nil {
+ bsc.hidden = make(map[string]*Desc)
+ }
+
+ bsc.hidden[d.fqName] = d
+}
+
+// Create intends to be called by registry.
+// Create will return true as long as there is one or more metrics not be hidden.
+// Otherwise return false, that means the whole collector will be ignored by registry.
+func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool {
+ bsc.init(self)
+
+ for _, d := range bsc.descriptors {
+ d.create(version)
+ if d.IsHidden() {
+ bsc.trackHiddenDescriptor(d)
+ } else {
+ bsc.trackRegistrableDescriptor(d)
+ }
+ }
+
+ if len(bsc.registerable) > 0 {
+ return true
+ }
+
+ return false
+}
+
+// ClearState will clear all the states marked by Create.
+// It intends to be used for re-register a hidden metric.
+func (bsc *BaseStableCollector) ClearState() {
+ for _, d := range bsc.descriptors {
+ d.ClearState()
+ }
+
+ bsc.descriptors = nil
+ bsc.registerable = nil
+ bsc.hidden = nil
+ bsc.self = nil
+}
+
+// HiddenMetrics tells the list of hidden metrics with fqName.
+func (bsc *BaseStableCollector) HiddenMetrics() (fqNames []string) {
+ for i := range bsc.hidden {
+ fqNames = append(fqNames, bsc.hidden[i].fqName)
+ }
+ return
+}
+
+// Check if our BaseStableCollector implements necessary interface
+var _ StableCollector = &BaseStableCollector{}
diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go
new file mode 100644
index 0000000000..e41d5383be
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/counter.go
@@ -0,0 +1,309 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "context"
+ "sync"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.opentelemetry.io/otel/trace"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Counter is our internal representation for our wrapping struct around prometheus
+// counters. Counter implements both kubeCollector and CounterMetric.
+type Counter struct {
+ ctx context.Context
+ CounterMetric
+ *CounterOpts
+ lazyMetric
+ selfCollector
+}
+
+// The implementation of the Metric interface is expected by testutil.GetCounterMetricValue.
+var _ Metric = &Counter{}
+
+// All supported exemplar metric types implement the metricWithExemplar interface.
+var _ metricWithExemplar = &Counter{}
+
+// exemplarCounterMetric holds a context to extract exemplar labels from, and a counter metric to attach them to. It implements the metricWithExemplar interface.
+type exemplarCounterMetric struct {
+ *Counter
+}
+
+// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated.
+func NewCounter(opts *CounterOpts) *Counter {
+ opts.StabilityLevel.setDefaults()
+
+ kc := &Counter{
+ CounterOpts: opts,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ kc.setPrometheusCounter(noop)
+ kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+ return kc
+}
+
+func (c *Counter) Desc() *prometheus.Desc {
+ return c.metric.Desc()
+}
+
+func (c *Counter) Write(to *dto.Metric) error {
+ return c.metric.Write(to)
+}
+
+// Reset resets the underlying prometheus Counter to start counting from 0 again
+func (c *Counter) Reset() {
+ if !c.IsCreated() {
+ return
+ }
+ c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts()))
+}
+
+// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement.
+func (c *Counter) setPrometheusCounter(counter prometheus.Counter) {
+ c.CounterMetric = counter
+ c.initSelfCollection(counter)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (c *Counter) DeprecatedVersion() *semver.Version {
+ return parseSemver(c.CounterOpts.DeprecatedVersion)
+}
+
+// initializeMetric invocation creates the actual underlying Counter. Until this method is called
+// the underlying counter is a no-op.
+func (c *Counter) initializeMetric() {
+ c.CounterOpts.annotateStabilityLevel()
+ // this actually creates the underlying prometheus counter.
+ c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts()))
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method
+// is called the underlying counter is a no-op.
+func (c *Counter) initializeDeprecatedMetric() {
+ c.CounterOpts.markDeprecated()
+ c.initializeMetric()
+}
+
+// WithContext allows the normal Counter metric to pass in context.
+func (c *Counter) WithContext(ctx context.Context) CounterMetric {
+ c.ctx = ctx
+ return c.CounterMetric
+}
+
+// withExemplar initializes the exemplarMetric object and sets the exemplar value.
+func (c *Counter) withExemplar(v float64) {
+ (&exemplarCounterMetric{c}).withExemplar(v)
+}
+
+func (c *Counter) Add(v float64) {
+ c.withExemplar(v)
+}
+
+func (c *Counter) Inc() {
+ c.withExemplar(1)
+}
+
+// withExemplar attaches an exemplar to the metric.
+func (e *exemplarCounterMetric) withExemplar(v float64) {
+ if m, ok := e.CounterMetric.(prometheus.ExemplarAdder); ok {
+ maybeSpanCtx := trace.SpanContextFromContext(e.ctx)
+ if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() {
+ exemplarLabels := prometheus.Labels{
+ "trace_id": maybeSpanCtx.TraceID().String(),
+ "span_id": maybeSpanCtx.SpanID().String(),
+ }
+ m.AddWithExemplar(v, exemplarLabels)
+ return
+ }
+ }
+
+ e.CounterMetric.Add(v)
+}
+
+// CounterVec is the internal representation of our wrapping struct around prometheus
+// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric.
+type CounterVec struct {
+ *prometheus.CounterVec
+ *CounterOpts
+ lazyMetric
+ originalLabels []string
+}
+
+var _ kubeCollector = &CounterVec{}
+
+// TODO: make this true: var _ CounterVecMetric = &CounterVec{}
+
+// NewCounterVec returns an object which satisfies the kubeCollector and (almost) CounterVecMetric interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated, and only members extracted after
+// registration will actually measure anything.
+func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec {
+ opts.StabilityLevel.setDefaults()
+
+ fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+ cv := &CounterVec{
+ CounterVec: noopCounterVec,
+ CounterOpts: opts,
+ originalLabels: labels,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ cv.lazyInit(cv, fqName)
+ return cv
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *CounterVec) DeprecatedVersion() *semver.Version {
+ return parseSemver(v.CounterOpts.DeprecatedVersion)
+
+}
+
+// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called
+// the underlying counterVec is a no-op.
+func (v *CounterVec) initializeMetric() {
+ v.CounterOpts.annotateStabilityLevel()
+ v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels)
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called
+// the underlying counterVec is a no-op.
+func (v *CounterVec) initializeDeprecatedMetric() {
+ v.CounterOpts.markDeprecated()
+ v.initializeMetric()
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created IFF the counterVec
+// has been registered to a metrics registry.
+func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric {
+ if !v.IsCreated() {
+ return noop // return no-op counter
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label values to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+ }
+
+ return v.CounterVec.WithLabelValues(lvs...)
+}
+
+// With returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created IFF the counterVec has
+// been registered to a metrics registry.
+func (v *CounterVec) With(labels map[string]string) CounterMetric {
+ if !v.IsCreated() {
+ return noop // return no-op counter
+ }
+
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainLabelMap(labels)
+ }
+
+ return v.CounterVec.With(labels)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *CounterVec) Delete(labels map[string]string) bool {
+ if !v.IsCreated() {
+ return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+ }
+ return v.CounterVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *CounterVec) Reset() {
+ if !v.IsCreated() {
+ return
+ }
+
+ v.CounterVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the CounterVec.
+// NOTE: This should only be used in test.
+func (v *CounterVec) ResetLabelAllowLists() {
+ v.initializeLabelAllowListsOnce = sync.Once{}
+ v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped CounterVec with context
+func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext {
+ return &CounterVecWithContext{
+ ctx: ctx,
+ CounterVec: v,
+ }
+}
+
+// CounterVecWithContext is the wrapper of CounterVec with context.
+type CounterVecWithContext struct {
+ *CounterVec
+ ctx context.Context
+}
+
+// WithLabelValues is the wrapper of CounterVec.WithLabelValues.
+func (vc *CounterVecWithContext) WithLabelValues(lvs ...string) CounterMetric {
+ return vc.CounterVec.WithLabelValues(lvs...)
+}
+
+// With is the wrapper of CounterVec.With.
+func (vc *CounterVecWithContext) With(labels map[string]string) CounterMetric {
+ return vc.CounterVec.With(labels)
+}
diff --git a/vendor/k8s.io/component-base/metrics/desc.go b/vendor/k8s.io/component-base/metrics/desc.go
new file mode 100644
index 0000000000..2ca9cfa7c2
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/desc.go
@@ -0,0 +1,225 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+
+ "k8s.io/klog/v2"
+)
+
+// Desc is a prometheus.Desc extension.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabels is the label names. Their label values are variable.
+ constLabels Labels
+ // variableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+
+ // promDesc is the descriptor used by every Prometheus Metric.
+ promDesc *prometheus.Desc
+ annotatedHelp string
+
+ // stabilityLevel represents the API guarantees for a given defined metric.
+ stabilityLevel StabilityLevel
+ // deprecatedVersion represents in which version this metric be deprecated.
+ deprecatedVersion string
+
+ isDeprecated bool
+ isHidden bool
+ isCreated bool
+ createLock sync.RWMutex
+ markDeprecationOnce sync.Once
+ createOnce sync.Once
+ deprecateOnce sync.Once
+ hideOnce sync.Once
+ annotateOnce sync.Once
+}
+
+// NewDesc extends prometheus.NewDesc with stability support.
+//
+// The stabilityLevel should be valid stability label, such as "metrics.ALPHA"
+// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA"
+// will be used in case of empty or invalid stability label.
+//
+// The deprecatedVersion represents in which version this Metric be deprecated.
+// The deprecation policy outlined by the control plane metrics stability KEP.
+func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels,
+ stabilityLevel StabilityLevel, deprecatedVersion string) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ annotatedHelp: help,
+ variableLabels: variableLabels,
+ constLabels: constLabels,
+ stabilityLevel: stabilityLevel,
+ deprecatedVersion: deprecatedVersion,
+ }
+ d.stabilityLevel.setDefaults()
+
+ return d
+}
+
+// String formats the Desc as a string.
+// The stability metadata maybe annotated in 'HELP' section if called after registry,
+// otherwise not.
+// e.g. "Desc{fqName: "normal_stable_descriptor", help: "[STABLE] this is a stable descriptor", constLabels: {}, variableLabels: []}"
+func (d *Desc) String() string {
+ if d.isCreated {
+ return d.promDesc.String()
+ }
+
+ return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String()
+}
+
+// toPrometheusDesc transform self to prometheus.Desc
+func (d *Desc) toPrometheusDesc() *prometheus.Desc {
+ return d.promDesc
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (d *Desc) DeprecatedVersion() *semver.Version {
+ return parseSemver(d.deprecatedVersion)
+
+}
+
+func (d *Desc) determineDeprecationStatus(version semver.Version) {
+ selfVersion := d.DeprecatedVersion()
+ if selfVersion == nil {
+ return
+ }
+ d.markDeprecationOnce.Do(func() {
+ if selfVersion.LTE(version) {
+ d.isDeprecated = true
+ }
+ if ShouldShowHidden() {
+ klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName)
+ return
+ }
+ if shouldHide(&version, selfVersion) {
+ // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369
+ // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName)
+ d.isHidden = true
+ }
+ })
+}
+
+// IsHidden returns if metric will be hidden
+func (d *Desc) IsHidden() bool {
+ return d.isHidden
+}
+
+// IsDeprecated returns if metric has been deprecated
+func (d *Desc) IsDeprecated() bool {
+ return d.isDeprecated
+}
+
+// IsCreated returns if metric has been created.
+func (d *Desc) IsCreated() bool {
+ d.createLock.RLock()
+ defer d.createLock.RUnlock()
+
+ return d.isCreated
+}
+
+// create forces the initialization of Desc which has been deferred until
+// the point at which this method is invoked. This method will determine whether
+// the Desc is deprecated or hidden, no-opting if the Desc should be considered
+// hidden. Furthermore, this function no-opts and returns true if Desc is already
+// created.
+func (d *Desc) create(version *semver.Version) bool {
+ if version != nil {
+ d.determineDeprecationStatus(*version)
+ }
+
+ // let's not create if this metric is slated to be hidden
+ if d.IsHidden() {
+ return false
+ }
+ d.createOnce.Do(func() {
+ d.createLock.Lock()
+ defer d.createLock.Unlock()
+
+ d.isCreated = true
+ if d.IsDeprecated() {
+ d.initializeDeprecatedDesc()
+ } else {
+ d.initialize()
+ }
+ })
+ return d.IsCreated()
+}
+
+// ClearState will clear all the states marked by Create.
+// It intends to be used for re-register a hidden metric.
+func (d *Desc) ClearState() {
+ d.isDeprecated = false
+ d.isHidden = false
+ d.isCreated = false
+
+ d.markDeprecationOnce = *new(sync.Once)
+ d.createOnce = *new(sync.Once)
+ d.deprecateOnce = *new(sync.Once)
+ d.hideOnce = *new(sync.Once)
+ d.annotateOnce = *new(sync.Once)
+
+ d.annotatedHelp = d.help
+ d.promDesc = nil
+}
+
+func (d *Desc) markDeprecated() {
+ d.deprecateOnce.Do(func() {
+ d.annotatedHelp = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.annotatedHelp)
+ })
+}
+
+func (d *Desc) annotateStabilityLevel() {
+ d.annotateOnce.Do(func() {
+ d.annotatedHelp = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.annotatedHelp)
+ })
+}
+
+func (d *Desc) initialize() {
+ d.annotateStabilityLevel()
+
+ // this actually creates the underlying prometheus desc.
+ d.promDesc = prometheus.NewDesc(d.fqName, d.annotatedHelp, d.variableLabels, prometheus.Labels(d.constLabels))
+}
+
+func (d *Desc) initializeDeprecatedDesc() {
+ d.markDeprecated()
+ d.initialize()
+}
+
+// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc().
+//
+// It will be useful in testing scenario that the same Desc be registered to different registry.
+// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created)
+// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B')
+func (d *Desc) GetRawDesc() *Desc {
+ return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion)
+}
diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go
new file mode 100644
index 0000000000..0d6c8b7fbf
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/gauge.go
@@ -0,0 +1,302 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "context"
+ "sync"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+
+ "k8s.io/component-base/version"
+)
+
+// Gauge is our internal representation for our wrapping struct around prometheus
+// gauges. kubeGauge implements both kubeCollector and KubeGauge.
+type Gauge struct {
+ GaugeMetric
+ *GaugeOpts
+ lazyMetric
+ selfCollector
+}
+
+var _ GaugeMetric = &Gauge{}
+var _ Registerable = &Gauge{}
+var _ kubeCollector = &Gauge{}
+
+// NewGauge returns an object which satisfies the kubeCollector, Registerable, and Gauge interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated.
+func NewGauge(opts *GaugeOpts) *Gauge {
+ opts.StabilityLevel.setDefaults()
+
+ kc := &Gauge{
+ GaugeOpts: opts,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ kc.setPrometheusGauge(noop)
+ kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+ return kc
+}
+
+// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) {
+ g.GaugeMetric = gauge
+ g.initSelfCollection(gauge)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (g *Gauge) DeprecatedVersion() *semver.Version {
+ return parseSemver(g.GaugeOpts.DeprecatedVersion)
+}
+
+// initializeMetric invocation creates the actual underlying Gauge. Until this method is called
+// the underlying gauge is a no-op.
+func (g *Gauge) initializeMetric() {
+ g.GaugeOpts.annotateStabilityLevel()
+ // this actually creates the underlying prometheus gauge.
+ g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts()))
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method
+// is called the underlying gauge is a no-op.
+func (g *Gauge) initializeDeprecatedMetric() {
+ g.GaugeOpts.markDeprecated()
+ g.initializeMetric()
+}
+
+// WithContext allows the normal Gauge metric to pass in context. The context is no-op now.
+func (g *Gauge) WithContext(ctx context.Context) GaugeMetric {
+ return g.GaugeMetric
+}
+
+// GaugeVec is the internal representation of our wrapping struct around prometheus
+// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec.
+type GaugeVec struct {
+ *prometheus.GaugeVec
+ *GaugeOpts
+ lazyMetric
+ originalLabels []string
+}
+
+var _ GaugeVecMetric = &GaugeVec{}
+var _ Registerable = &GaugeVec{}
+var _ kubeCollector = &GaugeVec{}
+
+// NewGaugeVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated, and only members extracted after
+// registration will actually measure anything.
+func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec {
+ opts.StabilityLevel.setDefaults()
+
+ fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+ cv := &GaugeVec{
+ GaugeVec: noopGaugeVec,
+ GaugeOpts: opts,
+ originalLabels: labels,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ cv.lazyInit(cv, fqName)
+ return cv
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *GaugeVec) DeprecatedVersion() *semver.Version {
+ return parseSemver(v.GaugeOpts.DeprecatedVersion)
+}
+
+// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called
+// the underlying gaugeVec is a no-op.
+func (v *GaugeVec) initializeMetric() {
+ v.GaugeOpts.annotateStabilityLevel()
+ v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels)
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called
+// the underlying gaugeVec is a no-op.
+func (v *GaugeVec) initializeDeprecatedMetric() {
+ v.GaugeOpts.markDeprecated()
+ v.initializeMetric()
+}
+
+func (v *GaugeVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) {
+ if !v.IsCreated() {
+ if v.IsHidden() {
+ return noop, nil
+ }
+ return noop, errNotRegistered // return no-op gauge
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label values to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+ }
+
+ return v.GetMetricWithLabelValues(lvs...)
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the GaugeMetric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec
+// has been registered to a metrics registry.
+func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric {
+ ans, err := v.WithLabelValuesChecked(lvs...)
+ if err == nil || ErrIsNotRegistered(err) {
+ return ans
+ }
+ panic(err)
+}
+
+func (v *GaugeVec) WithChecked(labels map[string]string) (GaugeMetric, error) {
+ if !v.IsCreated() {
+ if v.IsHidden() {
+ return noop, nil
+ }
+ return noop, errNotRegistered // return no-op gauge
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label map to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainLabelMap(labels)
+ }
+
+ return v.GetMetricWith(labels)
+}
+
+// With returns the GaugeMetric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has
+// been registered to a metrics registry.
+func (v *GaugeVec) With(labels map[string]string) GaugeMetric {
+ ans, err := v.WithChecked(labels)
+ if err == nil || ErrIsNotRegistered(err) {
+ return ans
+ }
+ panic(err)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *GaugeVec) Delete(labels map[string]string) bool {
+ if !v.IsCreated() {
+ return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+ }
+ return v.GaugeVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *GaugeVec) Reset() {
+ if !v.IsCreated() {
+ return
+ }
+
+ v.GaugeVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the GaugeVec.
+// NOTE: This should only be used in test.
+func (v *GaugeVec) ResetLabelAllowLists() {
+ v.initializeLabelAllowListsOnce = sync.Once{}
+ v.LabelValueAllowLists = nil
+}
+
+func newGaugeFunc(opts *GaugeOpts, function func() float64, v semver.Version) GaugeFunc {
+ g := NewGauge(opts)
+
+ if !g.Create(&v) {
+ return nil
+ }
+
+ return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function)
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts *GaugeOpts, function func() float64) GaugeFunc {
+ v := parseVersion(version.Get())
+
+ return newGaugeFunc(opts, function, v)
+}
+
+// WithContext returns wrapped GaugeVec with context
+func (v *GaugeVec) WithContext(ctx context.Context) *GaugeVecWithContext {
+ return &GaugeVecWithContext{
+ ctx: ctx,
+ GaugeVec: v,
+ }
+}
+
+func (v *GaugeVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric {
+ return v.WithContext(ctx)
+}
+
+// GaugeVecWithContext is the wrapper of GaugeVec with context.
+type GaugeVecWithContext struct {
+ *GaugeVec
+ ctx context.Context
+}
+
+// WithLabelValues is the wrapper of GaugeVec.WithLabelValues.
+func (vc *GaugeVecWithContext) WithLabelValues(lvs ...string) GaugeMetric {
+ return vc.GaugeVec.WithLabelValues(lvs...)
+}
+
+// With is the wrapper of GaugeVec.With.
+func (vc *GaugeVecWithContext) With(labels map[string]string) GaugeMetric {
+ return vc.GaugeVec.With(labels)
+}
diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go
new file mode 100644
index 0000000000..b410951b67
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/histogram.go
@@ -0,0 +1,304 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "context"
+ "sync"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Histogram is our internal representation for our wrapping struct around prometheus
+// histograms. Summary implements both kubeCollector and ObserverMetric
+type Histogram struct {
+ ctx context.Context
+ ObserverMetric
+ *HistogramOpts
+ lazyMetric
+ selfCollector
+}
+
+// exemplarHistogramMetric holds a context to extract exemplar labels from, and a historgram metric to attach them to. It implements the metricWithExemplar interface.
+type exemplarHistogramMetric struct {
+ *Histogram
+}
+
+type exemplarHistogramVec struct {
+ *HistogramVecWithContext
+ observer prometheus.Observer
+}
+
+func (h *Histogram) Observe(v float64) {
+ h.withExemplar(v)
+}
+
+// withExemplar initializes the exemplarMetric object and sets the exemplar value.
+func (h *Histogram) withExemplar(v float64) {
+ (&exemplarHistogramMetric{h}).withExemplar(v)
+}
+
+// withExemplar attaches an exemplar to the metric.
+func (e *exemplarHistogramMetric) withExemplar(v float64) {
+ if m, ok := e.Histogram.ObserverMetric.(prometheus.ExemplarObserver); ok {
+ maybeSpanCtx := trace.SpanContextFromContext(e.ctx)
+ if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() {
+ exemplarLabels := prometheus.Labels{
+ "trace_id": maybeSpanCtx.TraceID().String(),
+ "span_id": maybeSpanCtx.SpanID().String(),
+ }
+ m.ObserveWithExemplar(v, exemplarLabels)
+ return
+ }
+ }
+
+ e.ObserverMetric.Observe(v)
+}
+
+// NewHistogram returns an object which is Histogram-like. However, nothing
+// will be measured until the histogram is registered somewhere.
+func NewHistogram(opts *HistogramOpts) *Histogram {
+ opts.StabilityLevel.setDefaults()
+
+ h := &Histogram{
+ HistogramOpts: opts,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ h.setPrometheusHistogram(noopMetric{})
+ h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+ return h
+}
+
+// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) {
+ h.ObserverMetric = histogram
+ h.initSelfCollection(histogram)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (h *Histogram) DeprecatedVersion() *semver.Version {
+ return parseSemver(h.HistogramOpts.DeprecatedVersion)
+}
+
+// initializeMetric invokes the actual prometheus.Histogram object instantiation
+// and stores a reference to it
+func (h *Histogram) initializeMetric() {
+ h.HistogramOpts.annotateStabilityLevel()
+ // this actually creates the underlying prometheus gauge.
+ h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts()))
+}
+
+// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation
+// but modifies the Help description prior to object instantiation.
+func (h *Histogram) initializeDeprecatedMetric() {
+ h.HistogramOpts.markDeprecated()
+ h.initializeMetric()
+}
+
+// WithContext allows the normal Histogram metric to pass in context. The context is no-op now.
+func (h *Histogram) WithContext(ctx context.Context) ObserverMetric {
+ h.ctx = ctx
+ return h.ObserverMetric
+}
+
+// HistogramVec is the internal representation of our wrapping struct around prometheus
+// histogramVecs.
+type HistogramVec struct {
+ *prometheus.HistogramVec
+ *HistogramOpts
+ lazyMetric
+ originalLabels []string
+}
+
+// NewHistogramVec returns an object which satisfies kubeCollector and wraps the
+// prometheus.HistogramVec object. However, the object returned will not measure
+// anything unless the collector is first registered, since the metric is lazily instantiated,
+// and only members extracted after
+// registration will actually measure anything.
+
+func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec {
+ opts.StabilityLevel.setDefaults()
+
+ fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+ v := &HistogramVec{
+ HistogramVec: noopHistogramVec,
+ HistogramOpts: opts,
+ originalLabels: labels,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ v.lazyInit(v, fqName)
+ return v
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *HistogramVec) DeprecatedVersion() *semver.Version {
+ return parseSemver(v.HistogramOpts.DeprecatedVersion)
+}
+
+func (v *HistogramVec) initializeMetric() {
+ v.HistogramOpts.annotateStabilityLevel()
+ v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels)
+}
+
+func (v *HistogramVec) initializeDeprecatedMetric() {
+ v.HistogramOpts.markDeprecated()
+ v.initializeMetric()
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the ObserverMetric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec
+// has been registered to a metrics registry.
+func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric {
+ if !v.IsCreated() {
+ return noop
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label values to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+ }
+ return v.HistogramVec.WithLabelValues(lvs...)
+}
+
+// With returns the ObserverMetric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has
+// been registered to a metrics registry.
+func (v *HistogramVec) With(labels map[string]string) ObserverMetric {
+ if !v.IsCreated() {
+ return noop
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label map to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainLabelMap(labels)
+ }
+
+ return v.HistogramVec.With(labels)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *HistogramVec) Delete(labels map[string]string) bool {
+ if !v.IsCreated() {
+ return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+ }
+ return v.HistogramVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *HistogramVec) Reset() {
+ if !v.IsCreated() {
+ return
+ }
+
+ v.HistogramVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the HistogramVec.
+// NOTE: This should only be used in test.
+func (v *HistogramVec) ResetLabelAllowLists() {
+ v.initializeLabelAllowListsOnce = sync.Once{}
+ v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped HistogramVec with context
+func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext {
+ return &HistogramVecWithContext{
+ ctx: ctx,
+ HistogramVec: v,
+ }
+}
+
+// HistogramVecWithContext is the wrapper of HistogramVec with context.
+type HistogramVecWithContext struct {
+ *HistogramVec
+ ctx context.Context
+}
+
+func (h *exemplarHistogramVec) Observe(v float64) {
+ h.withExemplar(v)
+}
+
+func (h *exemplarHistogramVec) withExemplar(v float64) {
+ if m, ok := h.observer.(prometheus.ExemplarObserver); ok {
+ maybeSpanCtx := trace.SpanContextFromContext(h.HistogramVecWithContext.ctx)
+ if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() {
+ m.ObserveWithExemplar(v, prometheus.Labels{
+ "trace_id": maybeSpanCtx.TraceID().String(),
+ "span_id": maybeSpanCtx.SpanID().String(),
+ })
+ return
+ }
+ }
+
+ h.observer.Observe(v)
+}
+
+// WithLabelValues is the wrapper of HistogramVec.WithLabelValues.
+func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) *exemplarHistogramVec {
+ return &exemplarHistogramVec{
+ HistogramVecWithContext: vc,
+ observer: vc.HistogramVec.WithLabelValues(lvs...),
+ }
+}
+
+// With is the wrapper of HistogramVec.With.
+func (vc *HistogramVecWithContext) With(labels map[string]string) *exemplarHistogramVec {
+ return &exemplarHistogramVec{
+ HistogramVecWithContext: vc,
+ observer: vc.HistogramVec.With(labels),
+ }
+}
diff --git a/vendor/k8s.io/component-base/metrics/http.go b/vendor/k8s.io/component-base/metrics/http.go
new file mode 100644
index 0000000000..2a0d249c20
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/http.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+var (
+ processStartedAt time.Time
+)
+
+func init() {
+ processStartedAt = time.Now()
+}
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // HTTPErrorOnError serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError promhttp.HandlerErrorHandling = iota
+
+ // ContinueOnError ignore errors and try to serve as many metrics as possible.
+ // However, if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. In this case, it is highly
+ // recommended to provide other means of detecting errors: By setting an
+ // ErrorLog in HandlerOpts, the errors are logged. By providing a
+ // Registry in HandlerOpts, the exposed metrics include an error counter
+ // "promhttp_metric_handler_errors_total", which can be used for
+ // alerts.
+ ContinueOnError
+
+ // PanicOnError panics upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts promhttp.HandlerOpts
+
+func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts {
+ ho.ProcessStartTime = processStartedAt
+ return promhttp.HandlerOpts(*ho)
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler {
+ return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts())
+}
+
+// HandlerWithReset return an http.Handler with Reset
+func HandlerWithReset(reg KubeRegistry, opts HandlerOpts) http.Handler {
+ defaultHandler := promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts())
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodDelete {
+ reg.Reset()
+ io.WriteString(w, "metrics reset\n")
+ return
+ }
+ defaultHandler.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go
new file mode 100644
index 0000000000..11af3ae424
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/labels.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Labels represents a collection of label name -> value mappings.
+type Labels prometheus.Labels
diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go
new file mode 100644
index 0000000000..64a430b796
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package legacyregistry
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/collectors"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+
+ "k8s.io/component-base/metrics"
+)
+
+var (
+ defaultRegistry = metrics.NewKubeRegistry()
+ // DefaultGatherer exposes the global registry gatherer
+ DefaultGatherer metrics.Gatherer = defaultRegistry
+ // Reset calls reset on the global registry
+ Reset = defaultRegistry.Reset
+ // MustRegister registers registerable metrics but uses the global registry.
+ MustRegister = defaultRegistry.MustRegister
+ // RawMustRegister registers prometheus collectors but uses the global registry, this
+ // bypasses the metric stability framework
+ //
+ // Deprecated
+ RawMustRegister = defaultRegistry.RawMustRegister
+
+ // Register registers a collectable metric but uses the global registry
+ Register = defaultRegistry.Register
+
+ // Registerer exposes the global registerer
+ Registerer = defaultRegistry.Registerer
+
+ processStart time.Time
+)
+
+func init() {
+ RawMustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
+ RawMustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll)))
+ defaultRegistry.RegisterMetaMetrics()
+ processStart = time.Now()
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+func Handler() http.Handler {
+ return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{ProcessStartTime: processStart}))
+}
+
+// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes
+// registry reset if the http method is DELETE.
+func HandlerWithReset() http.Handler {
+ return promhttp.InstrumentMetricHandler(
+ prometheus.DefaultRegisterer,
+ metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{ProcessStartTime: processStart}))
+}
+
+// CustomRegister registers a custom collector but uses the global registry.
+func CustomRegister(c metrics.StableCollector) error {
+ err := defaultRegistry.CustomRegister(c)
+
+ //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13)
+ _ = prometheus.Register(c)
+
+ return err
+}
+
+// CustomMustRegister registers custom collectors but uses the global registry.
+func CustomMustRegister(cs ...metrics.StableCollector) {
+ defaultRegistry.CustomMustRegister(cs...)
+
+ for _, c := range cs {
+ prometheus.MustRegister(c)
+ }
+}
diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go
new file mode 100644
index 0000000000..c8b083995a
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/metric.go
@@ -0,0 +1,240 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "sync"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+
+ promext "k8s.io/component-base/metrics/prometheusextension"
+ "k8s.io/klog/v2"
+)
+
+/*
+kubeCollector extends the prometheus.Collector interface to allow customization of the metric
+registration process. Defer metric initialization until Create() is called, which then
+delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric
+method call depending on whether the metric is deprecated or not.
+*/
+type kubeCollector interface {
+ Collector
+ lazyKubeMetric
+ DeprecatedVersion() *semver.Version
+ // Each collector metric should provide an initialization function
+ // for both deprecated and non-deprecated variants of a metric. This
+ // is necessary since metric instantiation will be deferred
+ // until the metric is actually registered somewhere.
+ initializeMetric()
+ initializeDeprecatedMetric()
+}
+
+/*
+lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected
+to lazily instantiate metrics (i.e defer metric instantiation until when
+the Create() function is explicitly called).
+*/
+type lazyKubeMetric interface {
+ Create(*semver.Version) bool
+ IsCreated() bool
+ IsHidden() bool
+ IsDeprecated() bool
+}
+
+/*
+lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric
+registration time before instantiation. Add it as an anonymous field to a struct that
+implements kubeCollector to get deferred registration behavior. You must call lazyInit
+with the kubeCollector itself as an argument.
+*/
+type lazyMetric struct {
+ fqName string
+ isDeprecated bool
+ isHidden bool
+ isCreated bool
+ createLock sync.RWMutex
+ markDeprecationOnce sync.Once
+ createOnce sync.Once
+ self kubeCollector
+ stabilityLevel StabilityLevel
+}
+
+func (r *lazyMetric) IsCreated() bool {
+ r.createLock.RLock()
+ defer r.createLock.RUnlock()
+ return r.isCreated
+}
+
+// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed
+// to allow lazy initialization for. It should be invoked in the factory function which creates new
+// kubeCollector type objects.
+func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) {
+ r.fqName = fqName
+ r.self = self
+}
+
+// preprocessMetric figures out whether the lazy metric should be hidden or not.
+// This method takes a Version argument which should be the version of the binary in which
+// this code is currently being executed. A metric can be hidden under two conditions:
+// 1. if the metric is deprecated and is outside the grace period (i.e. has been
+// deprecated for more than one release
+// 2. if the metric is manually disabled via a CLI flag.
+//
+// Disclaimer: disabling a metric via a CLI flag has higher precedence than
+// deprecation and will override show-hidden-metrics for the explicitly
+// disabled metric.
+func (r *lazyMetric) preprocessMetric(version semver.Version) {
+ disabledMetricsLock.RLock()
+ defer disabledMetricsLock.RUnlock()
+ // disabling metrics is higher in precedence than showing hidden metrics
+ if _, ok := disabledMetrics[r.fqName]; ok {
+ r.isHidden = true
+ return
+ }
+ selfVersion := r.self.DeprecatedVersion()
+ if selfVersion == nil {
+ return
+ }
+ r.markDeprecationOnce.Do(func() {
+ if selfVersion.LTE(version) {
+ r.isDeprecated = true
+ }
+
+ if ShouldShowHidden() {
+ klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName)
+ return
+ }
+ if shouldHide(&version, selfVersion) {
+ // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369
+ // klog.Warningf("This metric has been deprecated for more than one release, hiding.")
+ r.isHidden = true
+ }
+ })
+}
+
+func (r *lazyMetric) IsHidden() bool {
+ return r.isHidden
+}
+
+func (r *lazyMetric) IsDeprecated() bool {
+ return r.isDeprecated
+}
+
+// Create forces the initialization of metric which has been deferred until
+// the point at which this method is invoked. This method will determine whether
+// the metric is deprecated or hidden, no-opting if the metric should be considered
+// hidden. Furthermore, this function no-opts and returns true if metric is already
+// created.
+func (r *lazyMetric) Create(version *semver.Version) bool {
+ if version != nil {
+ r.preprocessMetric(*version)
+ }
+ // let's not create if this metric is slated to be hidden
+ if r.IsHidden() {
+ return false
+ }
+
+ r.createOnce.Do(func() {
+ r.createLock.Lock()
+ defer r.createLock.Unlock()
+ r.isCreated = true
+ if r.IsDeprecated() {
+ r.self.initializeDeprecatedMetric()
+ } else {
+ r.self.initializeMetric()
+ }
+ })
+ sl := r.stabilityLevel
+ deprecatedV := r.self.DeprecatedVersion()
+ dv := ""
+ if deprecatedV != nil {
+ dv = deprecatedV.String()
+ }
+ registeredMetricsTotal.WithLabelValues(string(sl), dv).Inc()
+ return r.IsCreated()
+}
+
+// ClearState will clear all the states marked by Create.
+// It intends to be used for re-register a hidden metric.
+func (r *lazyMetric) ClearState() {
+ r.createLock.Lock()
+ defer r.createLock.Unlock()
+
+ r.isDeprecated = false
+ r.isHidden = false
+ r.isCreated = false
+ r.markDeprecationOnce = sync.Once{}
+ r.createOnce = sync.Once{}
+}
+
+// FQName returns the fully-qualified metric name of the collector.
+func (r *lazyMetric) FQName() string {
+ return r.fqName
+}
+
+/*
+This code is directly lifted from the prometheus codebase. It's a convenience struct which
+allows you satisfy the Collector interface automatically if you already satisfy the Metric interface.
+
+For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120
+*/
+type selfCollector struct {
+ metric prometheus.Metric
+}
+
+func (c *selfCollector) initSelfCollection(m prometheus.Metric) {
+ c.metric = m
+}
+
+func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) {
+ ch <- c.metric.Desc()
+}
+
+func (c *selfCollector) Collect(ch chan<- prometheus.Metric) {
+ ch <- c.metric
+}
+
+// metricWithExemplar is an interface that knows how to attach an exemplar to certain supported metric types.
+type metricWithExemplar interface {
+ withExemplar(v float64)
+}
+
+// no-op vecs for convenience
+var noopCounterVec = &prometheus.CounterVec{}
+var noopHistogramVec = &prometheus.HistogramVec{}
+var noopTimingHistogramVec = &promext.TimingHistogramVec{}
+var noopGaugeVec = &prometheus.GaugeVec{}
+
+// just use a convenience struct for all the no-ops
+var noop = &noopMetric{}
+
+type noopMetric struct{}
+
+func (noopMetric) Inc() {}
+func (noopMetric) Add(float64) {}
+func (noopMetric) Dec() {}
+func (noopMetric) Set(float64) {}
+func (noopMetric) Sub(float64) {}
+func (noopMetric) Observe(float64) {}
+func (noopMetric) ObserveWithWeight(float64, uint64) {}
+func (noopMetric) SetToCurrentTime() {}
+func (noopMetric) Desc() *prometheus.Desc { return nil }
+func (noopMetric) Write(*dto.Metric) error { return nil }
+func (noopMetric) Describe(chan<- *prometheus.Desc) {}
+func (noopMetric) Collect(chan<- prometheus.Metric) {}
diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go
new file mode 100644
index 0000000000..17f44ef2a3
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/options.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/blang/semver/v4"
+ "github.com/spf13/pflag"
+
+ "k8s.io/component-base/version"
+)
+
+// Options has all parameters needed for exposing metrics from components
+type Options struct {
+ ShowHiddenMetricsForVersion string
+ DisabledMetrics []string
+ AllowListMapping map[string]string
+ AllowListMappingManifest string
+}
+
+// NewOptions returns default metrics options
+func NewOptions() *Options {
+ return &Options{}
+}
+
+// Validate validates metrics flags options.
+func (o *Options) Validate() []error {
+ if o == nil {
+ return nil
+ }
+
+ var errs []error
+ err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion)
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ if err := validateAllowMetricLabel(o.AllowListMapping); err != nil {
+ errs = append(errs, err)
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+ return errs
+}
+
+// AddFlags adds flags for exposing component metrics.
+func (o *Options) AddFlags(fs *pflag.FlagSet) {
+ if o == nil {
+ return
+ }
+ fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion,
+ "The previous version for which you want to show hidden metrics. "+
+ "Only the previous minor version is meaningful, other values will not be allowed. "+
+ "The format is ., e.g.: '1.16'. "+
+ "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+
+ "rather than being surprised when they are permanently removed in the release after that.")
+ fs.StringSliceVar(&o.DisabledMetrics,
+ "disabled-metrics",
+ o.DisabledMetrics,
+ "This flag provides an escape hatch for misbehaving metrics. "+
+ "You must provide the fully qualified metric name in order to disable it. "+
+ "Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.")
+ fs.StringToStringVar(&o.AllowListMapping, "allow-metric-labels", o.AllowListMapping,
+ "The map from metric-label to value allow-list of this label. The key's format is ,. "+
+ "The value's format is ,..."+
+ "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.")
+ fs.StringVar(&o.AllowListMappingManifest, "allow-metric-labels-manifest", o.AllowListMappingManifest,
+ "The path to the manifest file that contains the allow-list mapping. "+
+ "The format of the file is the same as the flag --allow-metric-labels. "+
+ "Note that the flag --allow-metric-labels will override the manifest file.")
+}
+
+// Apply applies parameters into global configuration of metrics.
+func (o *Options) Apply() {
+ if o == nil {
+ return
+ }
+ if len(o.ShowHiddenMetricsForVersion) > 0 {
+ SetShowHidden()
+ }
+ // set disabled metrics
+ for _, metricName := range o.DisabledMetrics {
+ SetDisabledMetric(metricName)
+ }
+ if o.AllowListMapping != nil {
+ SetLabelAllowListFromCLI(o.AllowListMapping)
+ } else if len(o.AllowListMappingManifest) > 0 {
+ SetLabelAllowListFromManifest(o.AllowListMappingManifest)
+ }
+}
+
+func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error {
+ if targetVersionStr == "" {
+ return nil
+ }
+
+ validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1)
+ if targetVersionStr != validVersionStr {
+ return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr)
+ }
+
+ return nil
+}
+
+func validateAllowMetricLabel(allowListMapping map[string]string) error {
+ if allowListMapping == nil {
+ return nil
+ }
+ metricNameRegex := `[a-zA-Z_:][a-zA-Z0-9_:]*`
+ labelRegex := `[a-zA-Z_][a-zA-Z0-9_]*`
+ for k := range allowListMapping {
+ reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex)
+ if reg.FindString(k) != k {
+ return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName,labelName=labelValue, labelValue,...`")
+ }
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go
new file mode 100644
index 0000000000..247b9fd1c1
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/opts.go
@@ -0,0 +1,390 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+
+ yaml "go.yaml.in/yaml/v2"
+ "k8s.io/apimachinery/pkg/util/sets"
+ promext "k8s.io/component-base/metrics/prometheusextension"
+ "k8s.io/klog/v2"
+)
+
+var (
+ labelValueAllowLists = map[string]*MetricLabelAllowList{}
+ allowListLock sync.RWMutex
+)
+
+// ResetLabelValueAllowLists resets the allow lists for label values.
+// NOTE: This should only be used in test.
+func ResetLabelValueAllowLists() {
+ allowListLock.Lock()
+ defer allowListLock.Unlock()
+ labelValueAllowLists = map[string]*MetricLabelAllowList{}
+}
+
+// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure
+// is purposefully not embedded here because that would change struct initialization
+// in the manner which people are currently accustomed.
+//
+// Name must be set to a non-empty string. DeprecatedVersion is defined only
+// if the metric for which this options applies is, in fact, deprecated.
+type KubeOpts struct {
+ Namespace string
+ Subsystem string
+ Name string
+ Help string
+ ConstLabels map[string]string
+ DeprecatedVersion string
+ deprecateOnce sync.Once
+ annotateOnce sync.Once
+ StabilityLevel StabilityLevel
+ initializeLabelAllowListsOnce sync.Once
+ LabelValueAllowLists *MetricLabelAllowList
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ return prometheus.BuildFQName(namespace, subsystem, name)
+}
+
+// StabilityLevel represents the API guarantees for a given defined metric.
+type StabilityLevel string
+
+const (
+ // INTERNAL metrics have no stability guarantees, as such, labels may
+ // be arbitrarily added/removed and the metric may be deleted at any time.
+ INTERNAL StabilityLevel = "INTERNAL"
+ // ALPHA metrics have no stability guarantees, as such, labels may
+ // be arbitrarily added/removed and the metric may be deleted at any time.
+ ALPHA StabilityLevel = "ALPHA"
+ // BETA metrics are governed by the deprecation policy outlined in by
+ // the control plane metrics stability KEP.
+ BETA StabilityLevel = "BETA"
+ // STABLE metrics are guaranteed not be mutated and removal is governed by
+ // the deprecation policy outlined in by the control plane metrics stability KEP.
+ STABLE StabilityLevel = "STABLE"
+)
+
+// setDefaults takes 'ALPHA' in case of empty.
+func (sl *StabilityLevel) setDefaults() {
+ switch *sl {
+ case "":
+ *sl = ALPHA
+ default:
+ // no-op, since we have a StabilityLevel already
+ }
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts KubeOpts
+
+// Modify help description on the metric description.
+func (o *CounterOpts) markDeprecated() {
+ o.deprecateOnce.Do(func() {
+ o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+ })
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *CounterOpts) annotateStabilityLevel() {
+ o.annotateOnce.Do(func() {
+ o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+ })
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts {
+ return prometheus.CounterOpts{
+ Namespace: o.Namespace,
+ Subsystem: o.Subsystem,
+ Name: o.Name,
+ Help: o.Help,
+ ConstLabels: o.ConstLabels,
+ }
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts KubeOpts
+
+// Modify help description on the metric description.
+func (o *GaugeOpts) markDeprecated() {
+ o.deprecateOnce.Do(func() {
+ o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+ })
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *GaugeOpts) annotateStabilityLevel() {
+ o.annotateOnce.Do(func() {
+ o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+ })
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts {
+ return prometheus.GaugeOpts{
+ Namespace: o.Namespace,
+ Subsystem: o.Subsystem,
+ Name: o.Name,
+ Help: o.Help,
+ ConstLabels: o.ConstLabels,
+ }
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+ Namespace string
+ Subsystem string
+ Name string
+ Help string
+ ConstLabels map[string]string
+ Buckets []float64
+ DeprecatedVersion string
+ deprecateOnce sync.Once
+ annotateOnce sync.Once
+ StabilityLevel StabilityLevel
+ initializeLabelAllowListsOnce sync.Once
+ LabelValueAllowLists *MetricLabelAllowList
+}
+
+// Modify help description on the metric description.
+func (o *HistogramOpts) markDeprecated() {
+ o.deprecateOnce.Do(func() {
+ o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+ })
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *HistogramOpts) annotateStabilityLevel() {
+ o.annotateOnce.Do(func() {
+ o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+ })
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts {
+ return prometheus.HistogramOpts{
+ Namespace: o.Namespace,
+ Subsystem: o.Subsystem,
+ Name: o.Name,
+ Help: o.Help,
+ ConstLabels: o.ConstLabels,
+ Buckets: o.Buckets,
+ }
+}
+
+// TimingHistogramOpts bundles the options for creating a TimingHistogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type TimingHistogramOpts struct {
+ Namespace string
+ Subsystem string
+ Name string
+ Help string
+ ConstLabels map[string]string
+ Buckets []float64
+ InitialValue float64
+ DeprecatedVersion string
+ deprecateOnce sync.Once
+ annotateOnce sync.Once
+ StabilityLevel StabilityLevel
+ initializeLabelAllowListsOnce sync.Once
+ LabelValueAllowLists *MetricLabelAllowList
+}
+
+// Modify help description on the metric description.
+func (o *TimingHistogramOpts) markDeprecated() {
+ o.deprecateOnce.Do(func() {
+ o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+ })
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *TimingHistogramOpts) annotateStabilityLevel() {
+ o.annotateOnce.Do(func() {
+ o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+ })
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *TimingHistogramOpts) toPromHistogramOpts() promext.TimingHistogramOpts {
+ return promext.TimingHistogramOpts{
+ Namespace: o.Namespace,
+ Subsystem: o.Subsystem,
+ Name: o.Name,
+ Help: o.Help,
+ ConstLabels: o.ConstLabels,
+ Buckets: o.Buckets,
+ InitialValue: o.InitialValue,
+ }
+}
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v0.10 of the library.
+type SummaryOpts struct {
+ Namespace string
+ Subsystem string
+ Name string
+ Help string
+ ConstLabels map[string]string
+ Objectives map[float64]float64
+ MaxAge time.Duration
+ AgeBuckets uint32
+ BufCap uint32
+ DeprecatedVersion string
+ deprecateOnce sync.Once
+ annotateOnce sync.Once
+ StabilityLevel StabilityLevel
+ initializeLabelAllowListsOnce sync.Once
+ LabelValueAllowLists *MetricLabelAllowList
+}
+
+// Modify help description on the metric description.
+func (o *SummaryOpts) markDeprecated() {
+ o.deprecateOnce.Do(func() {
+ o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+ })
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *SummaryOpts) annotateStabilityLevel() {
+ o.annotateOnce.Do(func() {
+ o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+ })
+}
+
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v1.0.0 of the library. The default Summary will have no quantiles then.
+var (
+ defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+)
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts {
+ // we need to retain existing quantile behavior for backwards compatibility,
+ // so let's do what prometheus used to do prior to v1.
+ objectives := o.Objectives
+ if objectives == nil {
+ objectives = defObjectives
+ }
+ return prometheus.SummaryOpts{
+ Namespace: o.Namespace,
+ Subsystem: o.Subsystem,
+ Name: o.Name,
+ Help: o.Help,
+ ConstLabels: o.ConstLabels,
+ Objectives: objectives,
+ MaxAge: o.MaxAge,
+ AgeBuckets: o.AgeBuckets,
+ BufCap: o.BufCap,
+ }
+}
+
+type MetricLabelAllowList struct {
+ labelToAllowList map[string]sets.Set[string]
+}
+
+func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) {
+ for index, value := range labelValueList {
+ name := labelNameList[index]
+ if allowValues, ok := allowList.labelToAllowList[name]; ok {
+ if !allowValues.Has(value) {
+ labelValueList[index] = "unexpected"
+ cardinalityEnforcementUnexpectedCategorizationsTotal.Inc()
+ }
+ }
+ }
+}
+
+func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]string) {
+ for name, value := range labels {
+ if allowValues, ok := allowList.labelToAllowList[name]; ok {
+ if !allowValues.Has(value) {
+ labels[name] = "unexpected"
+ cardinalityEnforcementUnexpectedCategorizationsTotal.Inc()
+ }
+ }
+ }
+}
+
+func SetLabelAllowListFromCLI(allowListMapping map[string]string) {
+ allowListLock.Lock()
+ defer allowListLock.Unlock()
+ for metricLabelName, labelValues := range allowListMapping {
+ metricName := strings.Split(metricLabelName, ",")[0]
+ labelName := strings.Split(metricLabelName, ",")[1]
+ valueSet := sets.New[string](strings.Split(labelValues, ",")...)
+
+ allowList, ok := labelValueAllowLists[metricName]
+ if ok {
+ allowList.labelToAllowList[labelName] = valueSet
+ } else {
+ labelToAllowList := make(map[string]sets.Set[string])
+ labelToAllowList[labelName] = valueSet
+ labelValueAllowLists[metricName] = &MetricLabelAllowList{
+ labelToAllowList,
+ }
+ }
+ }
+}
+
+func SetLabelAllowListFromManifest(manifest string) {
+ allowListMapping := make(map[string]string)
+ data, err := os.ReadFile(filepath.Clean(manifest))
+ if err != nil {
+ klog.Errorf("Failed to read allow list manifest: %v", err)
+ return
+ }
+ err = yaml.Unmarshal(data, &allowListMapping)
+ if err != nil {
+ klog.Errorf("Failed to parse allow list manifest: %v", err)
+ return
+ }
+ SetLabelAllowListFromCLI(allowListMapping)
+}
diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go
new file mode 100644
index 0000000000..f4b98f8eb0
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/processstarttime.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "time"
+
+ "k8s.io/klog/v2"
+)
+
+var processStartTime = NewGaugeVec(
+ &GaugeOpts{
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ StabilityLevel: ALPHA,
+ },
+ []string{},
+)
+
+// RegisterProcessStartTime registers the process_start_time_seconds to
+// a prometheus registry. This metric needs to be included to ensure counter
+// data fidelity.
+func RegisterProcessStartTime(registrationFunc func(Registerable) error) error {
+ start, err := GetProcessStart()
+ if err != nil {
+ klog.Errorf("Could not get process start time, %v", err)
+ start = float64(time.Now().Unix())
+ }
+ // processStartTime is a lazy metric which only get initialized after registered.
+ // so we need to register the metric first and then set the value for it
+ if err = registrationFunc(processStartTime); err != nil {
+ return err
+ }
+
+ processStartTime.WithLabelValues().Set(start)
+ return nil
+}
diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/vendor/k8s.io/component-base/metrics/processstarttime_others.go
new file mode 100644
index 0000000000..611a12906b
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/processstarttime_others.go
@@ -0,0 +1,39 @@
+//go:build !windows
+// +build !windows
+
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "os"
+
+ "github.com/prometheus/procfs"
+)
+
+func GetProcessStart() (float64, error) {
+ pid := os.Getpid()
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return 0, err
+ }
+
+ if stat, err := p.Stat(); err == nil {
+ return stat.StartTime()
+ }
+ return 0, err
+}
diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go
new file mode 100644
index 0000000000..afee6f9b13
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go
@@ -0,0 +1,34 @@
+//go:build windows
+// +build windows
+
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "golang.org/x/sys/windows"
+)
+
+func GetProcessStart() (float64, error) {
+ processHandle := windows.CurrentProcess()
+
+ var creationTime, exitTime, kernelTime, userTime windows.Filetime
+ if err := windows.GetProcessTimes(processHandle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil {
+ return 0, err
+ }
+ return float64(creationTime.Nanoseconds() / 1e9), nil
+}
diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go
new file mode 100644
index 0000000000..be07977e28
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go
@@ -0,0 +1,189 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+ "errors"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+// GaugeOps is the part of `prometheus.Gauge` that is relevant to
+// instrumented code.
+// This factoring should be in prometheus, analogous to the way
+// it already factors out the Observer interface for histograms and summaries.
+type GaugeOps interface {
+ // Set is the same as Gauge.Set
+ Set(float64)
+ // Inc is the same as Gauge.inc
+ Inc()
+ // Dec is the same as Gauge.Dec
+ Dec()
+ // Add is the same as Gauge.Add
+ Add(float64)
+ // Sub is the same as Gauge.Sub
+ Sub(float64)
+
+ // SetToCurrentTime the same as Gauge.SetToCurrentTime
+ SetToCurrentTime()
+}
+
+// A TimingHistogram tracks how long a `float64` variable spends in
+// ranges defined by buckets. Time is counted in nanoseconds. The
+// histogram's sum is the integral over time (in nanoseconds, from
+// creation of the histogram) of the variable's value.
+type TimingHistogram interface {
+ prometheus.Metric
+ prometheus.Collector
+ GaugeOps
+}
+
+// TimingHistogramOpts is the parameters of the TimingHistogram constructor
+type TimingHistogramOpts struct {
+ Namespace string
+ Subsystem string
+ Name string
+ Help string
+ ConstLabels prometheus.Labels
+
+ // Buckets defines the buckets into which observations are
+ // accumulated. Each element in the slice is the upper
+ // inclusive bound of a bucket. The values must be sorted in
+ // strictly increasing order. There is no need to add a
+ // highest bucket with +Inf bound. The default value is
+ // prometheus.DefBuckets.
+ Buckets []float64
+
+ // The initial value of the variable.
+ InitialValue float64
+}
+
+// NewTimingHistogram creates a new TimingHistogram
+func NewTimingHistogram(opts TimingHistogramOpts) (TimingHistogram, error) {
+ return NewTestableTimingHistogram(time.Now, opts)
+}
+
+// NewTestableTimingHistogram creates a TimingHistogram that uses a mockable clock
+func NewTestableTimingHistogram(nowFunc func() time.Time, opts TimingHistogramOpts) (TimingHistogram, error) {
+ desc := prometheus.NewDesc(
+ prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ wrapTimingHelp(opts.Help),
+ nil,
+ opts.ConstLabels,
+ )
+ return newTimingHistogram(nowFunc, desc, opts)
+}
+
+func wrapTimingHelp(given string) string {
+ return "EXPERIMENTAL: " + given
+}
+
+func newTimingHistogram(nowFunc func() time.Time, desc *prometheus.Desc, opts TimingHistogramOpts, variableLabelValues ...string) (TimingHistogram, error) {
+ allLabelsM := prometheus.Labels{}
+ allLabelsS := prometheus.MakeLabelPairs(desc, variableLabelValues)
+ for _, pair := range allLabelsS {
+ if pair == nil || pair.Name == nil || pair.Value == nil {
+ return nil, errors.New("prometheus.MakeLabelPairs returned a nil")
+ }
+ allLabelsM[*pair.Name] = *pair.Value
+ }
+ weighted, err := newWeightedHistogram(desc, WeightedHistogramOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: opts.Name,
+ Help: opts.Help,
+ ConstLabels: allLabelsM,
+ Buckets: opts.Buckets,
+ }, variableLabelValues...)
+ if err != nil {
+ return nil, err
+ }
+ return &timingHistogram{
+ nowFunc: nowFunc,
+ weighted: weighted,
+ lastSetTime: nowFunc(),
+ value: opts.InitialValue,
+ }, nil
+}
+
+type timingHistogram struct {
+ nowFunc func() time.Time
+ weighted *weightedHistogram
+
+ // The following fields must only be accessed with weighted's lock held
+
+ lastSetTime time.Time // identifies when value was last set
+ value float64
+}
+
+var _ TimingHistogram = &timingHistogram{}
+
+func (th *timingHistogram) Set(newValue float64) {
+ th.update(func(float64) float64 { return newValue })
+}
+
+func (th *timingHistogram) Inc() {
+ th.update(func(oldValue float64) float64 { return oldValue + 1 })
+}
+
+func (th *timingHistogram) Dec() {
+ th.update(func(oldValue float64) float64 { return oldValue - 1 })
+}
+
+func (th *timingHistogram) Add(delta float64) {
+ th.update(func(oldValue float64) float64 { return oldValue + delta })
+}
+
+func (th *timingHistogram) Sub(delta float64) {
+ th.update(func(oldValue float64) float64 { return oldValue - delta })
+}
+
+func (th *timingHistogram) SetToCurrentTime() {
+ th.update(func(oldValue float64) float64 { return th.nowFunc().Sub(time.Unix(0, 0)).Seconds() })
+}
+
+func (th *timingHistogram) update(updateFn func(float64) float64) {
+ th.weighted.lock.Lock()
+ defer th.weighted.lock.Unlock()
+ now := th.nowFunc()
+ delta := now.Sub(th.lastSetTime)
+ value := th.value
+ if delta > 0 {
+ th.weighted.observeWithWeightLocked(value, uint64(delta))
+ th.lastSetTime = now
+ }
+ th.value = updateFn(value)
+}
+
+func (th *timingHistogram) Desc() *prometheus.Desc {
+ return th.weighted.Desc()
+}
+
+func (th *timingHistogram) Write(dest *dto.Metric) error {
+ th.Add(0) // account for time since last update
+ return th.weighted.Write(dest)
+}
+
+func (th *timingHistogram) Describe(ch chan<- *prometheus.Desc) {
+ ch <- th.weighted.Desc()
+}
+
+func (th *timingHistogram) Collect(ch chan<- prometheus.Metric) {
+ ch <- th
+}
diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go
new file mode 100644
index 0000000000..7af1a45860
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// GaugeVecOps is a bunch of Gauge that have the same
+// Desc and are distinguished by the values for their variable labels.
+type GaugeVecOps interface {
+ GetMetricWith(prometheus.Labels) (GaugeOps, error)
+ GetMetricWithLabelValues(lvs ...string) (GaugeOps, error)
+ With(prometheus.Labels) GaugeOps
+ WithLabelValues(...string) GaugeOps
+ CurryWith(prometheus.Labels) (GaugeVecOps, error)
+ MustCurryWith(prometheus.Labels) GaugeVecOps
+}
+
+type TimingHistogramVec struct {
+ *prometheus.MetricVec
+}
+
+var _ GaugeVecOps = &TimingHistogramVec{}
+var _ prometheus.Collector = &TimingHistogramVec{}
+
+func NewTimingHistogramVec(opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec {
+ return NewTestableTimingHistogramVec(time.Now, opts, labelNames...)
+}
+
+func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec {
+ desc := prometheus.NewDesc(
+ prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ wrapTimingHelp(opts.Help),
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &TimingHistogramVec{
+ MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric {
+ metric, err := newTimingHistogram(nowFunc, desc, opts, lvs...)
+ if err != nil {
+ panic(err) // like in prometheus.newHistogram
+ }
+ return metric
+ }),
+ }
+}
+
+func (hv *TimingHistogramVec) GetMetricWith(labels prometheus.Labels) (GaugeOps, error) {
+ metric, err := hv.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(GaugeOps), err
+ }
+ return nil, err
+}
+
+func (hv *TimingHistogramVec) GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) {
+ metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(GaugeOps), err
+ }
+ return nil, err
+}
+
+func (hv *TimingHistogramVec) With(labels prometheus.Labels) GaugeOps {
+ h, err := hv.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+func (hv *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeOps {
+ h, err := hv.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+func (hv *TimingHistogramVec) CurryWith(labels prometheus.Labels) (GaugeVecOps, error) {
+ vec, err := hv.MetricVec.CurryWith(labels)
+ if vec != nil {
+ return &TimingHistogramVec{MetricVec: vec}, err
+ }
+ return nil, err
+}
+
+func (hv *TimingHistogramVec) MustCurryWith(labels prometheus.Labels) GaugeVecOps {
+ vec, err := hv.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go
new file mode 100644
index 0000000000..a060019b25
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+// WeightedHistogram generalizes Histogram: each observation has
+// an associated _weight_. For a given `x` and `N`,
+// `1` call on `ObserveWithWeight(x, N)` has the same meaning as
+// `N` calls on `ObserveWithWeight(x, 1)`.
+// The weighted sum might differ slightly due to the use of
+// floating point, although the implementation takes some steps
+// to mitigate that.
+// If every weight were 1,
+// this would be the same as the existing Histogram abstraction.
+type WeightedHistogram interface {
+ prometheus.Metric
+ prometheus.Collector
+ WeightedObserver
+}
+
+// WeightedObserver generalizes the Observer interface.
+type WeightedObserver interface {
+ // Set the variable to the given value with the given weight.
+ ObserveWithWeight(value float64, weight uint64)
+}
+
+// WeightedHistogramOpts is the same as for an ordinary Histogram
+type WeightedHistogramOpts = prometheus.HistogramOpts
+
+// NewWeightedHistogram creates a new WeightedHistogram
+func NewWeightedHistogram(opts WeightedHistogramOpts) (WeightedHistogram, error) {
+ desc := prometheus.NewDesc(
+ prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ wrapWeightedHelp(opts.Help),
+ nil,
+ opts.ConstLabels,
+ )
+ return newWeightedHistogram(desc, opts)
+}
+
+func wrapWeightedHelp(given string) string {
+ return "EXPERIMENTAL: " + given
+}
+
+func newWeightedHistogram(desc *prometheus.Desc, opts WeightedHistogramOpts, variableLabelValues ...string) (*weightedHistogram, error) {
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = prometheus.DefBuckets
+ }
+
+ for i, upperBound := range opts.Buckets {
+ if i < len(opts.Buckets)-1 {
+ if upperBound >= opts.Buckets[i+1] {
+ return nil, fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, opts.Buckets[i+1],
+ )
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ opts.Buckets = opts.Buckets[:i]
+ }
+ }
+ }
+ upperBounds := make([]float64, len(opts.Buckets))
+ copy(upperBounds, opts.Buckets)
+
+ return &weightedHistogram{
+ desc: desc,
+ variableLabelValues: variableLabelValues,
+ upperBounds: upperBounds,
+ buckets: make([]uint64, len(upperBounds)+1),
+ hotCount: initialHotCount,
+ }, nil
+}
+
+type weightedHistogram struct {
+ desc *prometheus.Desc
+ variableLabelValues []string
+ upperBounds []float64 // exclusive of +Inf
+
+ lock sync.Mutex // applies to all the following
+
+ // buckets is longer by one than upperBounds.
+ // For 0 <= idx < len(upperBounds), buckets[idx] holds the
+ // accumulated time.Duration that value has been <=
+ // upperBounds[idx] but not <= upperBounds[idx-1].
+ // buckets[len(upperBounds)] holds the accumulated
+ // time.Duration when value fit in no other bucket.
+ buckets []uint64
+
+ // sumHot + sumCold is the weighted sum of value.
+ // Rather than risk loss of precision in one
+ // float64, we do this sum hierarchically. Many successive
+ // increments are added into sumHot; once in a while
+ // the magnitude of sumHot is compared to the magnitude
+ // of sumCold and, if the ratio is high enough,
+ // sumHot is transferred into sumCold.
+ sumHot float64
+ sumCold float64
+
+ transferThreshold float64 // = math.Abs(sumCold) / 2^26 (that's about half of the bits of precision in a float64)
+
+ // hotCount is used to decide when to consider dumping sumHot into sumCold.
+ // hotCount counts upward from initialHotCount to zero.
+ hotCount int
+}
+
+// initialHotCount is the negative of the number of terms
+// that are summed into sumHot before considering whether
+// to transfer to sumCold. This only has to be big enough
+// to make the extra floating point operations occur in a
+// distinct minority of cases.
+const initialHotCount = -15
+
+var _ WeightedHistogram = &weightedHistogram{}
+var _ prometheus.Metric = &weightedHistogram{}
+var _ prometheus.Collector = &weightedHistogram{}
+
+func (sh *weightedHistogram) ObserveWithWeight(value float64, weight uint64) {
+ idx := sort.SearchFloat64s(sh.upperBounds, value)
+ sh.lock.Lock()
+ defer sh.lock.Unlock()
+ sh.updateLocked(idx, value, weight)
+}
+
+func (sh *weightedHistogram) observeWithWeightLocked(value float64, weight uint64) {
+ idx := sort.SearchFloat64s(sh.upperBounds, value)
+ sh.updateLocked(idx, value, weight)
+}
+
+func (sh *weightedHistogram) updateLocked(idx int, value float64, weight uint64) {
+ sh.buckets[idx] += weight
+ newSumHot := sh.sumHot + float64(weight)*value
+ sh.hotCount++
+ if sh.hotCount >= 0 {
+ sh.hotCount = initialHotCount
+ if math.Abs(newSumHot) > sh.transferThreshold {
+ newSumCold := sh.sumCold + newSumHot
+ sh.sumCold = newSumCold
+ sh.transferThreshold = math.Abs(newSumCold / 67108864)
+ sh.sumHot = 0
+ return
+ }
+ }
+ sh.sumHot = newSumHot
+}
+
+func (sh *weightedHistogram) Desc() *prometheus.Desc {
+ return sh.desc
+}
+
+func (sh *weightedHistogram) Write(dest *dto.Metric) error {
+ count, sum, buckets := func() (uint64, float64, map[float64]uint64) {
+ sh.lock.Lock()
+ defer sh.lock.Unlock()
+ nBounds := len(sh.upperBounds)
+ buckets := make(map[float64]uint64, nBounds)
+ var count uint64
+ for idx, upperBound := range sh.upperBounds {
+ count += sh.buckets[idx]
+ buckets[upperBound] = count
+ }
+ count += sh.buckets[nBounds]
+ return count, sh.sumHot + sh.sumCold, buckets
+ }()
+ metric, err := prometheus.NewConstHistogram(sh.desc, count, sum, buckets, sh.variableLabelValues...)
+ if err != nil {
+ return err
+ }
+ return metric.Write(dest)
+}
+
+func (sh *weightedHistogram) Describe(ch chan<- *prometheus.Desc) {
+ ch <- sh.desc
+}
+
+func (sh *weightedHistogram) Collect(ch chan<- prometheus.Metric) {
+ ch <- sh
+}
diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go
new file mode 100644
index 0000000000..2ca95f0a7f
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// WeightedObserverVec is a bunch of WeightedObservers that have the same
+// Desc and are distinguished by the values for their variable labels.
+type WeightedObserverVec interface {
+ GetMetricWith(prometheus.Labels) (WeightedObserver, error)
+ GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error)
+ With(prometheus.Labels) WeightedObserver
+ WithLabelValues(...string) WeightedObserver
+ CurryWith(prometheus.Labels) (WeightedObserverVec, error)
+ MustCurryWith(prometheus.Labels) WeightedObserverVec
+}
+
+// WeightedHistogramVec implements WeightedObserverVec
+type WeightedHistogramVec struct {
+ *prometheus.MetricVec
+}
+
+var _ WeightedObserverVec = &WeightedHistogramVec{}
+var _ prometheus.Collector = &WeightedHistogramVec{}
+
+func NewWeightedHistogramVec(opts WeightedHistogramOpts, labelNames ...string) *WeightedHistogramVec {
+ desc := prometheus.NewDesc(
+ prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ wrapWeightedHelp(opts.Help),
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &WeightedHistogramVec{
+ MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric {
+ metric, err := newWeightedHistogram(desc, opts, lvs...)
+ if err != nil {
+ panic(err) // like in prometheus.newHistogram
+ }
+ return metric
+ }),
+ }
+}
+
+func (hv *WeightedHistogramVec) GetMetricWith(labels prometheus.Labels) (WeightedObserver, error) {
+ metric, err := hv.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(WeightedObserver), err
+ }
+ return nil, err
+}
+
+func (hv *WeightedHistogramVec) GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) {
+ metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(WeightedObserver), err
+ }
+ return nil, err
+}
+
+func (hv *WeightedHistogramVec) With(labels prometheus.Labels) WeightedObserver {
+ h, err := hv.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+func (hv *WeightedHistogramVec) WithLabelValues(lvs ...string) WeightedObserver {
+ h, err := hv.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+func (hv *WeightedHistogramVec) CurryWith(labels prometheus.Labels) (WeightedObserverVec, error) {
+ vec, err := hv.MetricVec.CurryWith(labels)
+ if vec != nil {
+ return &WeightedHistogramVec{MetricVec: vec}, err
+ }
+ return nil, err
+}
+
+func (hv *WeightedHistogramVec) MustCurryWith(labels prometheus.Labels) WeightedObserverVec {
+ vec, err := hv.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go
new file mode 100644
index 0000000000..203813e814
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/registry.go
@@ -0,0 +1,394 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+
+ apimachineryversion "k8s.io/apimachinery/pkg/version"
+ "k8s.io/component-base/version"
+)
+
+var (
+ showHiddenOnce sync.Once
+ disabledMetricsLock sync.RWMutex
+ showHidden atomic.Bool
+ registries []*kubeRegistry // stores all registries created by NewKubeRegistry()
+ registriesLock sync.RWMutex
+ disabledMetrics = map[string]struct{}{}
+
+ registeredMetricsTotal = NewCounterVec(
+ &CounterOpts{
+ Name: "registered_metrics_total",
+ Help: "The count of registered metrics broken by stability level and deprecation version.",
+ StabilityLevel: BETA,
+ },
+ []string{"stability_level", "deprecated_version"},
+ )
+
+ disabledMetricsTotal = NewCounter(
+ &CounterOpts{
+ Name: "disabled_metrics_total",
+ Help: "The count of disabled metrics.",
+ StabilityLevel: BETA,
+ },
+ )
+
+ hiddenMetricsTotal = NewCounter(
+ &CounterOpts{
+ Name: "hidden_metrics_total",
+ Help: "The count of hidden metrics.",
+ StabilityLevel: BETA,
+ },
+ )
+
+ cardinalityEnforcementUnexpectedCategorizationsTotal = NewCounter(
+ &CounterOpts{
+ Name: "cardinality_enforcement_unexpected_categorizations_total",
+ Help: "The count of unexpected categorizations during cardinality enforcement.",
+ StabilityLevel: ALPHA,
+ },
+ )
+)
+
+// shouldHide be used to check if a specific metric with deprecated version should be hidden
+// according to metrics deprecation lifecycle.
+func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool {
+ guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor))
+ if err != nil {
+ panic("failed to make version from current version")
+ }
+
+ if deprecatedVersion.LT(guardVersion) {
+ return true
+ }
+
+ return false
+}
+
+// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics.
+func ValidateShowHiddenMetricsVersion(v string) []error {
+ err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v)
+ if err != nil {
+ return []error{err}
+ }
+
+ return nil
+}
+
+func SetDisabledMetric(name string) {
+ disabledMetricsLock.Lock()
+ defer disabledMetricsLock.Unlock()
+ disabledMetrics[name] = struct{}{}
+ disabledMetricsTotal.Inc()
+}
+
+// SetShowHidden will enable showing hidden metrics. This will no-opt
+// after the initial call
+func SetShowHidden() {
+ showHiddenOnce.Do(func() {
+ showHidden.Store(true)
+
+ // re-register collectors that has been hidden in phase of last registry.
+ for _, r := range registries {
+ r.enableHiddenCollectors()
+ r.enableHiddenStableCollectors()
+ }
+ })
+}
+
+// ShouldShowHidden returns whether showing hidden deprecated metrics
+// is enabled. While the primary usecase for this is internal (to determine
+// registration behavior) this can also be used to introspect
+func ShouldShowHidden() bool {
+ return showHidden.Load()
+}
+
+// Registerable is an interface for a collector metric which we
+// will register with KubeRegistry.
+type Registerable interface {
+ prometheus.Collector
+
+ // Create will mark deprecated state for the collector
+ Create(version *semver.Version) bool
+
+ // ClearState will clear all the states marked by Create.
+ ClearState()
+
+ // FQName returns the fully-qualified metric name of the collector.
+ FQName() string
+}
+
+type resettable interface {
+ Reset()
+}
+
+// KubeRegistry is an interface which implements a subset of prometheus.Registerer and
+// prometheus.Gatherer interfaces
+type KubeRegistry interface {
+ // Deprecated
+ RawMustRegister(...prometheus.Collector)
+ // CustomRegister is our internal variant of Prometheus registry.Register
+ CustomRegister(c StableCollector) error
+ // CustomMustRegister is our internal variant of Prometheus registry.MustRegister
+ CustomMustRegister(cs ...StableCollector)
+ // Register conforms to Prometheus registry.Register
+ Register(Registerable) error
+ // MustRegister conforms to Prometheus registry.MustRegister
+ MustRegister(...Registerable)
+ // Unregister conforms to Prometheus registry.Unregister
+ Unregister(collector Collector) bool
+ // Gather conforms to Prometheus gatherer.Gather
+ Gather() ([]*dto.MetricFamily, error)
+ // Reset invokes the Reset() function on all items in the registry
+ // which are added as resettables.
+ Reset()
+ // RegisterMetaMetrics registers metrics about the number of registered metrics.
+ RegisterMetaMetrics()
+ // Registerer exposes the underlying prometheus registerer
+ Registerer() prometheus.Registerer
+ // Gatherer exposes the underlying prometheus gatherer
+ Gatherer() prometheus.Gatherer
+}
+
+// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization
+// the kubernetes binary version information is loaded into the registry object, so that
+// automatic behavior can be configured for metric versioning.
+type kubeRegistry struct {
+ PromRegistry
+ version semver.Version
+ hiddenCollectors map[string]Registerable // stores all collectors that has been hidden
+ stableCollectors []StableCollector // stores all stable collector
+ hiddenCollectorsLock sync.RWMutex
+ stableCollectorsLock sync.RWMutex
+ resetLock sync.RWMutex
+ resettables []resettable
+}
+
+// Register registers a new Collector to be included in metrics
+// collection. It returns an error if the descriptors provided by the
+// Collector are invalid or if they — in combination with descriptors of
+// already registered Collectors — do not fulfill the consistency and
+// uniqueness criteria described in the documentation of metric.Desc.
+func (kr *kubeRegistry) Register(c Registerable) error {
+ if c.Create(&kr.version) {
+ defer kr.addResettable(c)
+ return kr.PromRegistry.Register(c)
+ }
+
+ kr.trackHiddenCollector(c)
+ return nil
+}
+
+// Registerer exposes the underlying prometheus.Registerer
+func (kr *kubeRegistry) Registerer() prometheus.Registerer {
+ return kr.PromRegistry
+}
+
+// Gatherer exposes the underlying prometheus.Gatherer
+func (kr *kubeRegistry) Gatherer() prometheus.Gatherer {
+ return kr.PromRegistry
+}
+
+// MustRegister works like Register but registers any number of
+// Collectors and panics upon the first registration that causes an
+// error.
+func (kr *kubeRegistry) MustRegister(cs ...Registerable) {
+ metrics := make([]prometheus.Collector, 0, len(cs))
+ for _, c := range cs {
+ if c.Create(&kr.version) {
+ metrics = append(metrics, c)
+ kr.addResettable(c)
+ } else {
+ kr.trackHiddenCollector(c)
+ }
+ }
+ kr.PromRegistry.MustRegister(metrics...)
+}
+
+// CustomRegister registers a new custom collector.
+func (kr *kubeRegistry) CustomRegister(c StableCollector) error {
+ kr.trackStableCollectors(c)
+ defer kr.addResettable(c)
+ if c.Create(&kr.version, c) {
+ return kr.PromRegistry.Register(c)
+ }
+ return nil
+}
+
+// CustomMustRegister works like CustomRegister but registers any number of
+// StableCollectors and panics upon the first registration that causes an
+// error.
+func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) {
+ kr.trackStableCollectors(cs...)
+ collectors := make([]prometheus.Collector, 0, len(cs))
+ for _, c := range cs {
+ if c.Create(&kr.version, c) {
+ kr.addResettable(c)
+ collectors = append(collectors, c)
+ }
+ }
+ kr.PromRegistry.MustRegister(collectors...)
+}
+
+// RawMustRegister takes a native prometheus.Collector and registers the collector
+// to the registry. This bypasses metrics safety checks, so should only be used
+// to register custom prometheus collectors.
+//
+// Deprecated
+func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) {
+ kr.PromRegistry.MustRegister(cs...)
+ for _, c := range cs {
+ kr.addResettable(c)
+ }
+}
+
+// addResettable will automatically add our metric to our reset
+// list if it satisfies the interface
+func (kr *kubeRegistry) addResettable(i interface{}) {
+ kr.resetLock.Lock()
+ defer kr.resetLock.Unlock()
+ if resettable, ok := i.(resettable); ok {
+ kr.resettables = append(kr.resettables, resettable)
+ }
+}
+
+// Unregister unregisters the Collector that equals the Collector passed
+// in as an argument. (Two Collectors are considered equal if their
+// Describe method yields the same set of descriptors.) The function
+// returns whether a Collector was unregistered. Note that an unchecked
+// Collector cannot be unregistered (as its Describe method does not
+// yield any descriptor).
+func (kr *kubeRegistry) Unregister(collector Collector) bool {
+ return kr.PromRegistry.Unregister(collector)
+}
+
+// Gather calls the Collect method of the registered Collectors and then
+// gathers the collected metrics into a lexicographically sorted slice
+// of uniquely named MetricFamily protobufs. Gather ensures that the
+// returned slice is valid and self-consistent so that it can be used
+// for valid exposition. As an exception to the strict consistency
+// requirements described for metric.Desc, Gather will tolerate
+// different sets of label names for metrics of the same metric family.
+func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) {
+ return kr.PromRegistry.Gather()
+}
+
+// trackHiddenCollector stores all hidden collectors.
+func (kr *kubeRegistry) trackHiddenCollector(c Registerable) {
+ kr.hiddenCollectorsLock.Lock()
+ defer kr.hiddenCollectorsLock.Unlock()
+
+ kr.hiddenCollectors[c.FQName()] = c
+ hiddenMetricsTotal.Inc()
+}
+
+// trackStableCollectors stores all custom collectors.
+func (kr *kubeRegistry) trackStableCollectors(cs ...StableCollector) {
+ kr.stableCollectorsLock.Lock()
+ defer kr.stableCollectorsLock.Unlock()
+
+ kr.stableCollectors = append(kr.stableCollectors, cs...)
+}
+
+// enableHiddenCollectors will re-register all of the hidden collectors.
+func (kr *kubeRegistry) enableHiddenCollectors() {
+ if len(kr.hiddenCollectors) == 0 {
+ return
+ }
+
+ kr.hiddenCollectorsLock.Lock()
+ cs := make([]Registerable, 0, len(kr.hiddenCollectors))
+
+ for _, c := range kr.hiddenCollectors {
+ c.ClearState()
+ cs = append(cs, c)
+ }
+
+ kr.hiddenCollectors = make(map[string]Registerable)
+ kr.hiddenCollectorsLock.Unlock()
+ kr.MustRegister(cs...)
+}
+
+// enableHiddenStableCollectors will re-register the stable collectors if there is one or more hidden metrics in it.
+// Since we can not register a metrics twice, so we have to unregister first then register again.
+func (kr *kubeRegistry) enableHiddenStableCollectors() {
+ if len(kr.stableCollectors) == 0 {
+ return
+ }
+
+ kr.stableCollectorsLock.Lock()
+
+ cs := make([]StableCollector, 0, len(kr.stableCollectors))
+ for _, c := range kr.stableCollectors {
+ if len(c.HiddenMetrics()) > 0 {
+ kr.Unregister(c) // unregister must happens before clear state, otherwise no metrics would be unregister
+ c.ClearState()
+ cs = append(cs, c)
+ }
+ }
+
+ kr.stableCollectors = nil
+ kr.stableCollectorsLock.Unlock()
+ kr.CustomMustRegister(cs...)
+}
+
+// Reset invokes Reset on all metrics that are resettable.
+func (kr *kubeRegistry) Reset() {
+ kr.resetLock.RLock()
+ defer kr.resetLock.RUnlock()
+ for _, r := range kr.resettables {
+ r.Reset()
+ }
+}
+
+// BuildVersion is a helper function that can be easily mocked.
+var BuildVersion = version.Get
+
+func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry {
+ r := &kubeRegistry{
+ PromRegistry: prometheus.NewRegistry(),
+ version: parseVersion(v),
+ hiddenCollectors: make(map[string]Registerable),
+ resettables: make([]resettable, 0),
+ }
+
+ registriesLock.Lock()
+ defer registriesLock.Unlock()
+ registries = append(registries, r)
+
+ return r
+}
+
+// NewKubeRegistry creates a new vanilla Registry
+func NewKubeRegistry() KubeRegistry {
+ r := newKubeRegistry(BuildVersion())
+ return r
+}
+
+func (r *kubeRegistry) RegisterMetaMetrics() {
+ r.MustRegister(registeredMetricsTotal)
+ r.MustRegister(disabledMetricsTotal)
+ r.MustRegister(hiddenMetricsTotal)
+ r.MustRegister(cardinalityEnforcementUnexpectedCategorizationsTotal)
+}
diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go
new file mode 100644
index 0000000000..3654e4ea09
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/summary.go
@@ -0,0 +1,249 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "context"
+ "sync"
+
+ "github.com/blang/semver/v4"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ DefAgeBuckets = prometheus.DefAgeBuckets
+ DefBufCap = prometheus.DefBufCap
+ DefMaxAge = prometheus.DefMaxAge
+)
+
+// Summary is our internal representation for our wrapping struct around prometheus
+// summaries. Summary implements both kubeCollector and ObserverMetric
+//
+// DEPRECATED: as per the metrics overhaul KEP
+type Summary struct {
+ ObserverMetric
+ *SummaryOpts
+ lazyMetric
+ selfCollector
+}
+
+// NewSummary returns an object which is Summary-like. However, nothing
+// will be measured until the summary is registered somewhere.
+//
+// DEPRECATED: as per the metrics overhaul KEP
+func NewSummary(opts *SummaryOpts) *Summary {
+ opts.StabilityLevel.setDefaults()
+
+ s := &Summary{
+ SummaryOpts: opts,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ s.setPrometheusSummary(noopMetric{})
+ s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+ return s
+}
+
+// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (s *Summary) setPrometheusSummary(summary prometheus.Summary) {
+ s.ObserverMetric = summary
+ s.initSelfCollection(summary)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (s *Summary) DeprecatedVersion() *semver.Version {
+ return parseSemver(s.SummaryOpts.DeprecatedVersion)
+}
+
+// initializeMetric invokes the actual prometheus.Summary object instantiation
+// and stores a reference to it
+func (s *Summary) initializeMetric() {
+ s.SummaryOpts.annotateStabilityLevel()
+ // this actually creates the underlying prometheus gauge.
+ s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts()))
+}
+
+// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation
+// but modifies the Help description prior to object instantiation.
+func (s *Summary) initializeDeprecatedMetric() {
+ s.SummaryOpts.markDeprecated()
+ s.initializeMetric()
+}
+
+// WithContext allows the normal Summary metric to pass in context. The context is no-op now.
+func (s *Summary) WithContext(ctx context.Context) ObserverMetric {
+ return s.ObserverMetric
+}
+
+// SummaryVec is the internal representation of our wrapping struct around prometheus
+// summaryVecs.
+//
+// DEPRECATED: as per the metrics overhaul KEP
+type SummaryVec struct {
+ *prometheus.SummaryVec
+ *SummaryOpts
+ lazyMetric
+ originalLabels []string
+}
+
+// NewSummaryVec returns an object which satisfies kubeCollector and wraps the
+// prometheus.SummaryVec object. However, the object returned will not measure
+// anything unless the collector is first registered, since the metric is lazily instantiated,
+// and only members extracted after
+// registration will actually measure anything.
+//
+// DEPRECATED: as per the metrics overhaul KEP
+func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec {
+ opts.StabilityLevel.setDefaults()
+
+ fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+ v := &SummaryVec{
+ SummaryOpts: opts,
+ originalLabels: labels,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ v.lazyInit(v, fqName)
+ return v
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *SummaryVec) DeprecatedVersion() *semver.Version {
+ return parseSemver(v.SummaryOpts.DeprecatedVersion)
+}
+
+func (v *SummaryVec) initializeMetric() {
+ v.SummaryOpts.annotateStabilityLevel()
+ v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels)
+}
+
+func (v *SummaryVec) initializeDeprecatedMetric() {
+ v.SummaryOpts.markDeprecated()
+ v.initializeMetric()
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the ObserverMetric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec
+// has been registered to a metrics registry.
+func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric {
+ if !v.IsCreated() {
+ return noop
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label values to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+ }
+ return v.SummaryVec.WithLabelValues(lvs...)
+}
+
+// With returns the ObserverMetric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has
+// been registered to a metrics registry.
+func (v *SummaryVec) With(labels map[string]string) ObserverMetric {
+ if !v.IsCreated() {
+ return noop
+ }
+
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainLabelMap(labels)
+ }
+ return v.SummaryVec.With(labels)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *SummaryVec) Delete(labels map[string]string) bool {
+ if !v.IsCreated() {
+ return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+ }
+ return v.SummaryVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *SummaryVec) Reset() {
+ if !v.IsCreated() {
+ return
+ }
+
+ v.SummaryVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the SummaryVec.
+// NOTE: This should only be used in test.
+func (v *SummaryVec) ResetLabelAllowLists() {
+ v.initializeLabelAllowListsOnce = sync.Once{}
+ v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped SummaryVec with context
+func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext {
+ return &SummaryVecWithContext{
+ ctx: ctx,
+ SummaryVec: v,
+ }
+}
+
+// SummaryVecWithContext is the wrapper of SummaryVec with context.
+type SummaryVecWithContext struct {
+ *SummaryVec
+ ctx context.Context
+}
+
+// WithLabelValues is the wrapper of SummaryVec.WithLabelValues.
+func (vc *SummaryVecWithContext) WithLabelValues(lvs ...string) ObserverMetric {
+ return vc.SummaryVec.WithLabelValues(lvs...)
+}
+
+// With is the wrapper of SummaryVec.With.
+func (vc *SummaryVecWithContext) With(labels map[string]string) ObserverMetric {
+ return vc.SummaryVec.With(labels)
+}
diff --git a/vendor/k8s.io/component-base/metrics/timing_histogram.go b/vendor/k8s.io/component-base/metrics/timing_histogram.go
new file mode 100644
index 0000000000..5399f8d1b7
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/timing_histogram.go
@@ -0,0 +1,298 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/blang/semver/v4"
+ promext "k8s.io/component-base/metrics/prometheusextension"
+)
+
+// PrometheusTimingHistogram is the abstraction of the underlying histogram
+// that we want to promote from the wrapper.
+type PrometheusTimingHistogram interface {
+ GaugeMetric
+}
+
+// TimingHistogram is our internal representation for our wrapping struct around
+// timing histograms. It implements both kubeCollector and GaugeMetric
+type TimingHistogram struct {
+ PrometheusTimingHistogram
+ *TimingHistogramOpts
+ nowFunc func() time.Time
+ lazyMetric
+ selfCollector
+}
+
+var _ GaugeMetric = &TimingHistogram{}
+var _ Registerable = &TimingHistogram{}
+var _ kubeCollector = &TimingHistogram{}
+
+// NewTimingHistogram returns an object which is TimingHistogram-like. However, nothing
+// will be measured until the histogram is registered somewhere.
+func NewTimingHistogram(opts *TimingHistogramOpts) *TimingHistogram {
+ return NewTestableTimingHistogram(time.Now, opts)
+}
+
+// NewTestableTimingHistogram adds injection of the clock
+func NewTestableTimingHistogram(nowFunc func() time.Time, opts *TimingHistogramOpts) *TimingHistogram {
+ opts.StabilityLevel.setDefaults()
+
+ h := &TimingHistogram{
+ TimingHistogramOpts: opts,
+ nowFunc: nowFunc,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ h.setPrometheusHistogram(noopMetric{})
+ h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+ return h
+}
+
+// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (h *TimingHistogram) setPrometheusHistogram(histogram promext.TimingHistogram) {
+ h.PrometheusTimingHistogram = histogram
+ h.initSelfCollection(histogram)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (h *TimingHistogram) DeprecatedVersion() *semver.Version {
+ return parseSemver(h.TimingHistogramOpts.DeprecatedVersion)
+}
+
+// initializeMetric invokes the actual prometheus.Histogram object instantiation
+// and stores a reference to it
+func (h *TimingHistogram) initializeMetric() {
+ h.TimingHistogramOpts.annotateStabilityLevel()
+ // this actually creates the underlying prometheus gauge.
+ histogram, err := promext.NewTestableTimingHistogram(h.nowFunc, h.TimingHistogramOpts.toPromHistogramOpts())
+ if err != nil {
+ panic(err) // handle as for regular histograms
+ }
+ h.setPrometheusHistogram(histogram)
+}
+
+// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation
+// but modifies the Help description prior to object instantiation.
+func (h *TimingHistogram) initializeDeprecatedMetric() {
+ h.TimingHistogramOpts.markDeprecated()
+ h.initializeMetric()
+}
+
+// WithContext allows the normal TimingHistogram metric to pass in context. The context is no-op now.
+func (h *TimingHistogram) WithContext(ctx context.Context) GaugeMetric {
+ return h.PrometheusTimingHistogram
+}
+
+// TimingHistogramVec is the internal representation of our wrapping struct around prometheus
+// TimingHistogramVecs.
+type TimingHistogramVec struct {
+ *promext.TimingHistogramVec
+ *TimingHistogramOpts
+ nowFunc func() time.Time
+ lazyMetric
+ originalLabels []string
+}
+
+var _ GaugeVecMetric = &TimingHistogramVec{}
+var _ Registerable = &TimingHistogramVec{}
+var _ kubeCollector = &TimingHistogramVec{}
+
+// NewTimingHistogramVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces
+// and wraps an underlying promext.TimingHistogramVec object. Note well the way that
+// behavior depends on registration and whether this is hidden.
+func NewTimingHistogramVec(opts *TimingHistogramOpts, labels []string) *TimingHistogramVec {
+ return NewTestableTimingHistogramVec(time.Now, opts, labels)
+}
+
+// NewTestableTimingHistogramVec adds injection of the clock.
+func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts *TimingHistogramOpts, labels []string) *TimingHistogramVec {
+ opts.StabilityLevel.setDefaults()
+
+ fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+ v := &TimingHistogramVec{
+ TimingHistogramVec: noopTimingHistogramVec,
+ TimingHistogramOpts: opts,
+ nowFunc: nowFunc,
+ originalLabels: labels,
+ lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+ }
+ v.lazyInit(v, fqName)
+ return v
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *TimingHistogramVec) DeprecatedVersion() *semver.Version {
+ return parseSemver(v.TimingHistogramOpts.DeprecatedVersion)
+}
+
+func (v *TimingHistogramVec) initializeMetric() {
+ v.TimingHistogramOpts.annotateStabilityLevel()
+ v.TimingHistogramVec = promext.NewTestableTimingHistogramVec(v.nowFunc, v.TimingHistogramOpts.toPromHistogramOpts(), v.originalLabels...)
+}
+
+func (v *TimingHistogramVec) initializeDeprecatedMetric() {
+ v.TimingHistogramOpts.markDeprecated()
+ v.initializeMetric()
+}
+
+// WithLabelValuesChecked, if called before this vector has been registered in
+// at least one registry, will return a noop gauge and
+// an error that passes ErrIsNotRegistered.
+// If called on a hidden vector,
+// will return a noop gauge and a nil error.
+// If called with a syntactic problem in the labels, will
+// return a noop gauge and an error about the labels.
+// If none of the above apply, this method will return
+// the appropriate vector member and a nil error.
+func (v *TimingHistogramVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) {
+ if !v.IsCreated() {
+ if v.IsHidden() {
+ return noop, nil
+ }
+ return noop, errNotRegistered
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label values to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+ }
+ ops, err := v.TimingHistogramVec.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ return noop, err
+ }
+ return ops.(GaugeMetric), err
+}
+
+// WithLabelValues calls WithLabelValuesChecked
+// and handles errors as follows.
+// An error that passes ErrIsNotRegistered is ignored
+// and the noop gauge is returned;
+// all other errors cause a panic.
+func (v *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeMetric {
+ ans, err := v.WithLabelValuesChecked(lvs...)
+ if err == nil || ErrIsNotRegistered(err) {
+ return ans
+ }
+ panic(err)
+}
+
+// WithChecked, if called before this vector has been registered in
+// at least one registry, will return a noop gauge and
+// an error that passes ErrIsNotRegistered.
+// If called on a hidden vector,
+// will return a noop gauge and a nil error.
+// If called with a syntactic problem in the labels, will
+// return a noop gauge and an error about the labels.
+// If none of the above apply, this method will return
+// the appropriate vector member and a nil error.
+func (v *TimingHistogramVec) WithChecked(labels map[string]string) (GaugeMetric, error) {
+ if !v.IsCreated() {
+ if v.IsHidden() {
+ return noop, nil
+ }
+ return noop, errNotRegistered
+ }
+
+ // Initialize label allow lists if not already initialized
+ v.initializeLabelAllowListsOnce.Do(func() {
+ allowListLock.RLock()
+ if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+ v.LabelValueAllowLists = allowList
+ }
+ allowListLock.RUnlock()
+ })
+
+ // Constrain label map to allowed values
+ if v.LabelValueAllowLists != nil {
+ v.LabelValueAllowLists.ConstrainLabelMap(labels)
+ }
+ ops, err := v.TimingHistogramVec.GetMetricWith(labels)
+ if err != nil {
+ return noop, err
+ }
+ return ops.(GaugeMetric), err
+}
+
+// With calls WithChecked and handles errors as follows.
+// An error that passes ErrIsNotRegistered is ignored
+// and the noop gauge is returned;
+// all other errors cause a panic.
+func (v *TimingHistogramVec) With(labels map[string]string) GaugeMetric {
+ ans, err := v.WithChecked(labels)
+ if err == nil || ErrIsNotRegistered(err) {
+ return ans
+ }
+ panic(err)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *TimingHistogramVec) Delete(labels map[string]string) bool {
+ if !v.IsCreated() {
+ return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+ }
+ return v.TimingHistogramVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *TimingHistogramVec) Reset() {
+ if !v.IsCreated() {
+ return
+ }
+
+ v.TimingHistogramVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the TimingHistogramVec.
+// NOTE: This should only be used in test.
+func (v *TimingHistogramVec) ResetLabelAllowLists() {
+ v.initializeLabelAllowListsOnce = sync.Once{}
+ v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped TimingHistogramVec with context
+func (v *TimingHistogramVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric {
+ return &TimingHistogramVecWithContext{
+ ctx: ctx,
+ TimingHistogramVec: v,
+ }
+}
+
+// TimingHistogramVecWithContext is the wrapper of TimingHistogramVec with context.
+// Currently the context is ignored.
+type TimingHistogramVecWithContext struct {
+ *TimingHistogramVec
+ ctx context.Context
+}
diff --git a/vendor/k8s.io/component-base/metrics/value.go b/vendor/k8s.io/component-base/metrics/value.go
new file mode 100644
index 0000000000..4a405048cf
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/value.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+func (vt *ValueType) toPromValueType() prometheus.ValueType {
+ return prometheus.ValueType(*vt)
+}
+
+// NewLazyConstMetric is a helper of MustNewConstMetric.
+//
+// Note: If the metrics described by the desc is hidden, the metrics will not be created.
+func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ if desc.IsHidden() {
+ return nil
+ }
+ return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...)
+}
+
+// NewConstMetric is a helper of NewConstMetric.
+//
+// Note: If the metrics described by the desc is hidden, the metrics will not be created.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if desc.IsHidden() {
+ return nil, nil
+ }
+ return prometheus.NewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...)
+}
+
+// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp.
+//
+// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(),
+// otherwise, no stability guarantees would be offered.
+func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric {
+ if m == nil {
+ return nil
+ }
+
+ return prometheus.NewMetricWithTimestamp(t, m)
+}
diff --git a/vendor/k8s.io/component-base/metrics/version.go b/vendor/k8s.io/component-base/metrics/version.go
new file mode 100644
index 0000000000..f963e205eb
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/version.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import "k8s.io/component-base/version"
+
+var (
+ buildInfo = NewGaugeVec(
+ &GaugeOpts{
+ Name: "kubernetes_build_info",
+ Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.",
+ StabilityLevel: ALPHA,
+ },
+ []string{"major", "minor", "git_version", "git_commit", "git_tree_state", "build_date", "go_version", "compiler", "platform"},
+ )
+)
+
+// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus
+func RegisterBuildInfo(r KubeRegistry) {
+ info := version.Get()
+ r.MustRegister(buildInfo)
+ buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1)
+}
diff --git a/vendor/k8s.io/component-base/metrics/version_parser.go b/vendor/k8s.io/component-base/metrics/version_parser.go
new file mode 100644
index 0000000000..102e108e2b
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/version_parser.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/blang/semver/v4"
+
+ apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+const (
+ versionRegexpString = `^v(\d+\.\d+\.\d+)`
+)
+
+var (
+ versionRe = regexp.MustCompile(versionRegexpString)
+)
+
+func parseSemver(s string) *semver.Version {
+ if s != "" {
+ sv := semver.MustParse(s)
+ return &sv
+ }
+ return nil
+}
+func parseVersion(ver apimachineryversion.Info) semver.Version {
+ matches := versionRe.FindAllStringSubmatch(ver.String(), -1)
+
+ if len(matches) != 1 {
+ panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String()))
+ }
+ return semver.MustParse(matches[0][1])
+}
diff --git a/vendor/k8s.io/component-base/metrics/wrappers.go b/vendor/k8s.io/component-base/metrics/wrappers.go
new file mode 100644
index 0000000000..679590aad9
--- /dev/null
+++ b/vendor/k8s.io/component-base/metrics/wrappers.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "errors"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+// This file contains a series of interfaces which we explicitly define for
+// integrating with prometheus. We redefine the interfaces explicitly here
+// so that we can prevent breakage if methods are ever added to prometheus
+// variants of them.
+
+// Collector defines a subset of prometheus.Collector interface methods
+type Collector interface {
+ Describe(chan<- *prometheus.Desc)
+ Collect(chan<- prometheus.Metric)
+}
+
+// Metric defines a subset of prometheus.Metric interface methods
+type Metric interface {
+ Desc() *prometheus.Desc
+ Write(*dto.Metric) error
+}
+
+// CounterMetric is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+
+// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter
+type CounterMetric interface {
+ Inc()
+ Add(float64)
+}
+
+// CounterVecMetric is an interface which prometheus.CounterVec satisfies.
+type CounterVecMetric interface {
+ WithLabelValues(...string) CounterMetric
+ With(prometheus.Labels) CounterMetric
+}
+
+// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge
+type GaugeMetric interface {
+ Set(float64)
+ Inc()
+ Dec()
+ Add(float64)
+ Write(out *dto.Metric) error
+ SetToCurrentTime()
+}
+
+// GaugeVecMetric is a collection of Gauges that differ only in label values.
+type GaugeVecMetric interface {
+ // Default Prometheus Vec behavior is that member extraction results in creation of a new element
+ // if one with the unique label values is not found in the underlying stored metricMap.
+ // This means that if this function is called but the underlying metric is not registered
+ // (which means it will never be exposed externally nor consumed), the metric would exist in memory
+ // for perpetuity (i.e. throughout application lifecycle).
+ //
+ // For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208
+ //
+ // In contrast, the Vec behavior in this package is that member extraction before registration
+ // returns a permanent noop object.
+
+ // WithLabelValuesChecked, if called before this vector has been registered in
+ // at least one registry, will return a noop gauge and
+ // an error that passes ErrIsNotRegistered.
+ // If called on a hidden vector,
+ // will return a noop gauge and a nil error.
+ // If called with a syntactic problem in the labels, will
+ // return a noop gauge and an error about the labels.
+ // If none of the above apply, this method will return
+ // the appropriate vector member and a nil error.
+ WithLabelValuesChecked(labelValues ...string) (GaugeMetric, error)
+
+ // WithLabelValues calls WithLabelValuesChecked
+ // and handles errors as follows.
+ // An error that passes ErrIsNotRegistered is ignored
+ // and the noop gauge is returned;
+ // all other errors cause a panic.
+ WithLabelValues(labelValues ...string) GaugeMetric
+
+ // WithChecked, if called before this vector has been registered in
+ // at least one registry, will return a noop gauge and
+ // an error that passes ErrIsNotRegistered.
+ // If called on a hidden vector,
+ // will return a noop gauge and a nil error.
+ // If called with a syntactic problem in the labels, will
+ // return a noop gauge and an error about the labels.
+ // If none of the above apply, this method will return
+ // the appropriate vector member and a nil error.
+ WithChecked(labels map[string]string) (GaugeMetric, error)
+
+ // With calls WithChecked and handles errors as follows.
+ // An error that passes ErrIsNotRegistered is ignored
+ // and the noop gauge is returned;
+ // all other errors cause a panic.
+ With(labels map[string]string) GaugeMetric
+
+ // Delete asserts that the vec should have no member for the given label set.
+ // The returned bool indicates whether there was a change.
+ // The return will certainly be `false` if the given label set has the wrong
+ // set of label names.
+ Delete(map[string]string) bool
+
+ // Reset removes all the members
+ Reset()
+}
+
+// ObserverMetric captures individual observations.
+type ObserverMetric interface {
+ Observe(float64)
+}
+
+// PromRegistry is an interface which implements a subset of prometheus.Registerer and
+// prometheus.Gatherer interfaces
+type PromRegistry interface {
+ Register(prometheus.Collector) error
+ MustRegister(...prometheus.Collector)
+ Unregister(prometheus.Collector) bool
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies.
+type Gatherer interface {
+ prometheus.Gatherer
+}
+
+// Registerer is the interface for the part of a registry in charge of registering
+// the collected metrics.
+type Registerer interface {
+ prometheus.Registerer
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+func ErrIsNotRegistered(err error) bool {
+ return err == errNotRegistered
+}
+
+var errNotRegistered = errors.New("metric vec is not registered yet")
diff --git a/vendor/k8s.io/component-base/version/OWNERS b/vendor/k8s.io/component-base/version/OWNERS
new file mode 100644
index 0000000000..08c7aea9c7
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/OWNERS
@@ -0,0 +1,16 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# Currently assigned this directory to sig-api-machinery since this is
+# an interface to the version definition in "k8s.io/apimachinery/pkg/version",
+# and also to sig-release since this version information is set up for
+# each release.
+
+approvers:
+ - sig-api-machinery-api-approvers
+ - release-engineering-approvers
+reviewers:
+ - sig-api-machinery-api-reviewers
+ - release-managers
+labels:
+ - sig/api-machinery
+ - sig/release
diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go
new file mode 100644
index 0000000000..b5e889019e
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/base.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Base version information.
+//
+// This is the fallback data used when version information from git is not
+// provided via go ldflags. It provides an approximation of the Kubernetes
+// version for ad-hoc builds (e.g. `go build`) that cannot get the version
+// information from git.
+//
+// If you are looking at these fields in the git tree, they look
+// strange. They are set by the build process with ldflags -X. The
+// in-tree values are dummy values used for "git archive", which also
+// works for GitHub tar downloads.
+var (
+ // TODO: Deprecate gitMajor and gitMinor, use only gitVersion
+ // instead. First step in deprecation, keep the fields but make
+ // them irrelevant. (Next we'll take it out, which may muck with
+ // scripts consuming the kubectl version output - but most of
+ // these should be looking at gitVersion already anyways.)
+ gitMajor string // major version, always numeric
+ gitMinor string // minor version, numeric possibly followed by "+"
+
+ // semantic version, derived by build scripts (see
+ // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md
+ // for a detailed discussion of this field)
+ //
+ // TODO: This field is still called "gitVersion" for legacy
+ // reasons. For prerelease versions, the build metadata on the
+ // semantic version is a git hash, but the version itself is no
+ // longer the direct output of "git describe", but a slight
+ // translation to be semver compliant.
+
+ // NOTE: The $Format strings are replaced during 'git archive' thanks to the
+ // companion .gitattributes file containing 'export-subst' in this same
+ // directory. See also https://git-scm.com/docs/gitattributes
+ gitVersion = "v0.0.0-master+$Format:%H$"
+ gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
+ gitTreeState = "" // state of git tree, either "clean" or "dirty"
+
+ buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
+
+const (
+ // DefaultKubeBinaryVersion is the hard coded k8 binary version based on the latest K8s release.
+ // It is supposed to be consistent with gitMajor and gitMinor, except for local tests, where gitMajor and gitMinor are "".
+ // Should update for each minor release!
+ DefaultKubeBinaryVersion = "1.34"
+)
diff --git a/vendor/k8s.io/component-base/version/dynamic.go b/vendor/k8s.io/component-base/version/dynamic.go
new file mode 100644
index 0000000000..46ade9f5ec
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/dynamic.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ utilversion "k8s.io/apimachinery/pkg/util/version"
+)
+
+var dynamicGitVersion atomic.Value
+
+func init() {
+ // initialize to static gitVersion
+ dynamicGitVersion.Store(gitVersion)
+}
+
+// SetDynamicVersion overrides the version returned as the GitVersion from Get().
+// The specified version must be non-empty, a valid semantic version, and must
+// match the major/minor/patch version of the default gitVersion.
+func SetDynamicVersion(dynamicVersion string) error {
+ if err := ValidateDynamicVersion(dynamicVersion); err != nil {
+ return err
+ }
+ dynamicGitVersion.Store(dynamicVersion)
+ return nil
+}
+
+// ValidateDynamicVersion ensures the given version is non-empty, a valid semantic version,
+// and matched the major/minor/patch version of the default gitVersion.
+func ValidateDynamicVersion(dynamicVersion string) error {
+ return validateDynamicVersion(dynamicVersion, gitVersion)
+}
+
+func validateDynamicVersion(dynamicVersion, defaultVersion string) error {
+ if len(dynamicVersion) == 0 {
+ return fmt.Errorf("version must not be empty")
+ }
+ if dynamicVersion == defaultVersion {
+ // allow no-op
+ return nil
+ }
+ vRuntime, err := utilversion.ParseSemantic(dynamicVersion)
+ if err != nil {
+ return err
+ }
+ // must match major/minor/patch of default version
+ var vDefault *utilversion.Version
+ if defaultVersion == "v0.0.0-master+$Format:%H$" {
+ // special-case the placeholder value which doesn't parse as a semantic version
+ vDefault, err = utilversion.ParseSemantic("v0.0.0-master")
+ } else {
+ vDefault, err = utilversion.ParseSemantic(defaultVersion)
+ }
+ if err != nil {
+ return err
+ }
+ if vRuntime.Major() != vDefault.Major() || vRuntime.Minor() != vDefault.Minor() || vRuntime.Patch() != vDefault.Patch() {
+ return fmt.Errorf("version %q must match major/minor/patch of default version %q", dynamicVersion, defaultVersion)
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/component-base/version/version.go b/vendor/k8s.io/component-base/version/version.go
new file mode 100644
index 0000000000..c71ccf1969
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/version.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "fmt"
+ "runtime"
+
+ apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+// Get returns the overall codebase version. It's for detecting
+// what code a binary was built from.
+// The caller should use BinaryMajor and BinaryMinor to determine
+// the binary version. The Major and Minor fields are still set by git version for backwards compatibility.
+func Get() apimachineryversion.Info {
+ // These variables typically come from -ldflags settings and in
+ // their absence fallback to the settings in ./base.go
+ return apimachineryversion.Info{
+ Major: gitMajor,
+ Minor: gitMinor,
+ GitVersion: dynamicGitVersion.Load().(string),
+ GitCommit: gitCommit,
+ GitTreeState: gitTreeState,
+ BuildDate: buildDate,
+ GoVersion: runtime.Version(),
+ Compiler: runtime.Compiler,
+ Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+ }
+}
diff --git a/vendor/k8s.io/kube-aggregator/LICENSE b/vendor/k8s.io/kube-aggregator/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go
new file mode 100644
index 0000000000..e1a1ab796d
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=apiregistration.k8s.io
+
+// Package apiregistration is the internal version of the API.
+package apiregistration
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go
new file mode 100644
index 0000000000..dfa7460080
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiregistration
+
+import (
+ "sort"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/version"
+)
+
+// SortedByGroupAndVersion sorts APIServices into their different groups, and then sorts them based on their versions.
+// For example, the first element of the first array contains the APIService with the highest version number, in the
+// group with the highest priority; while the last element of the last array contains the APIService with the lowest
+// version number, in the group with the lowest priority.
+func SortedByGroupAndVersion(servers []*APIService) [][]*APIService {
+ serversByGroupPriorityMinimum := ByGroupPriorityMinimum(servers)
+ sort.Sort(serversByGroupPriorityMinimum)
+
+ ret := [][]*APIService{}
+ for _, curr := range serversByGroupPriorityMinimum {
+ // check to see if we already have an entry for this group
+ existingIndex := -1
+ for j, groupInReturn := range ret {
+ if groupInReturn[0].Spec.Group == curr.Spec.Group {
+ existingIndex = j
+ break
+ }
+ }
+
+ if existingIndex >= 0 {
+ ret[existingIndex] = append(ret[existingIndex], curr)
+ sort.Sort(ByVersionPriority(ret[existingIndex]))
+ continue
+ }
+
+ ret = append(ret, []*APIService{curr})
+ }
+
+ return ret
+}
+
+// ByGroupPriorityMinimum sorts with the highest group number first, then by name.
+// This is not a simple reverse, because we want the name sorting to be alpha, not
+// reverse alpha.
+type ByGroupPriorityMinimum []*APIService
+
+func (s ByGroupPriorityMinimum) Len() int { return len(s) }
+func (s ByGroupPriorityMinimum) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s ByGroupPriorityMinimum) Less(i, j int) bool {
+ if s[i].Spec.GroupPriorityMinimum != s[j].Spec.GroupPriorityMinimum {
+ return s[i].Spec.GroupPriorityMinimum > s[j].Spec.GroupPriorityMinimum
+ }
+ return s[i].Name < s[j].Name
+}
+
+// ByVersionPriority sorts with the highest version number first, then by name.
+// This is not a simple reverse, because we want the name sorting to be alpha, not
+// reverse alpha.
+type ByVersionPriority []*APIService
+
+func (s ByVersionPriority) Len() int { return len(s) }
+func (s ByVersionPriority) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s ByVersionPriority) Less(i, j int) bool {
+ if s[i].Spec.VersionPriority != s[j].Spec.VersionPriority {
+ return s[i].Spec.VersionPriority > s[j].Spec.VersionPriority
+ }
+ return version.CompareKubeAwareVersionStrings(s[i].Spec.Version, s[j].Spec.Version) > 0
+}
+
+// NewLocalAvailableAPIServiceCondition returns a condition for an available local APIService.
+func NewLocalAvailableAPIServiceCondition() APIServiceCondition {
+ return APIServiceCondition{
+ Type: Available,
+ Status: ConditionTrue,
+ LastTransitionTime: metav1.Now(),
+ Reason: "Local",
+ Message: "Local APIServices are always available",
+ }
+}
+
+// GetAPIServiceConditionByType gets an *APIServiceCondition by APIServiceConditionType if present
+func GetAPIServiceConditionByType(apiService *APIService, conditionType APIServiceConditionType) *APIServiceCondition {
+ for i := range apiService.Status.Conditions {
+ if apiService.Status.Conditions[i].Type == conditionType {
+ return &apiService.Status.Conditions[i]
+ }
+ }
+ return nil
+}
+
+// SetAPIServiceCondition sets the status condition. It either overwrites the existing one or
+// creates a new one
+func SetAPIServiceCondition(apiService *APIService, newCondition APIServiceCondition) {
+ existingCondition := GetAPIServiceConditionByType(apiService, newCondition.Type)
+ if existingCondition == nil {
+ apiService.Status.Conditions = append(apiService.Status.Conditions, newCondition)
+ return
+ }
+
+ if existingCondition.Status != newCondition.Status {
+ existingCondition.Status = newCondition.Status
+ existingCondition.LastTransitionTime = newCondition.LastTransitionTime
+ }
+
+ existingCondition.Reason = newCondition.Reason
+ existingCondition.Message = newCondition.Message
+}
+
+// IsAPIServiceConditionTrue indicates if the condition is present and strictly true
+func IsAPIServiceConditionTrue(apiService *APIService, conditionType APIServiceConditionType) bool {
+ condition := GetAPIServiceConditionByType(apiService, conditionType)
+ return condition != nil && condition.Status == ConditionTrue
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go
new file mode 100644
index 0000000000..7b88df42fa
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiregistration
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the API group for apiregistration
+const GroupName = "apiregistration.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &APIService{},
+ &APIServiceList{},
+ )
+ return nil
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go
new file mode 100644
index 0000000000..97411783f3
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go
@@ -0,0 +1,146 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiregistration
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIServiceList is a list of APIService objects.
+type APIServiceList struct {
+ metav1.TypeMeta
+ metav1.ListMeta
+
+ Items []APIService
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+type ServiceReference struct {
+ // Namespace is the namespace of the service
+ Namespace string
+ // Name is the name of the service
+ Name string
+ // If specified, the port on the service that hosting the service.
+ // Default to 443 for backward compatibility.
+ // `port` should be a valid port number (1-65535, inclusive).
+ // +optional
+ Port int32
+}
+
+// APIServiceSpec contains information for locating and communicating with a server.
+// Only https is supported, though you are able to disable certificate verification.
+type APIServiceSpec struct {
+ // Service is a reference to the service for this API server. It must communicate
+ // on port 443.
+ // If the Service is nil, that means the handling for the API groupversion is handled locally on this server.
+ // The call will simply delegate to the normal handler chain to be fulfilled.
+ // +optional
+ Service *ServiceReference
+ // Group is the API group name this server hosts
+ Group string
+ // Version is the API version this server hosts. For example, "v1"
+ Version string
+
+ // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server.
+ // This is strongly discouraged. You should use the CABundle instead.
+ InsecureSkipTLSVerify bool
+ // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate.
+ // If unspecified, system trust roots on the apiserver are used.
+ // +listType=atomic
+ // +optional
+ CABundle []byte
+
+ // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones.
+ // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority.
+ // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10).
+ // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo)
+ // We'd recommend something like: *.k8s.io (except extensions) at 18000 and
+ // PaaSes (OpenShift, Deis) are recommended to be in the 2000s
+ GroupPriorityMinimum int32
+
+ // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero.
+ // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10).
+ // Since it's inside of a group, the number can be small, probably in the 10s.
+ // In case of equal version priorities, the version string will be used to compute the order inside a group.
+ // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered
+ // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version),
+ // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first
+ // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major
+ // version, then minor version. An example sorted list of versions:
+ // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
+ VersionPriority int32
+}
+
+// ConditionStatus indicates the status of a condition (true, false, or unknown).
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition;
+// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// APIServiceConditionType is a valid value for APIServiceCondition.Type
+type APIServiceConditionType string
+
+const (
+ // Available indicates that the service exists and is reachable
+ Available APIServiceConditionType = "Available"
+)
+
+// APIServiceCondition describes conditions for an APIService
+type APIServiceCondition struct {
+ // Type is the type of the condition.
+ Type APIServiceConditionType
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ Status ConditionStatus
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime metav1.Time
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ Reason string
+ // Human-readable message indicating details about last transition.
+ Message string
+}
+
+// APIServiceStatus contains derived information about an API server
+type APIServiceStatus struct {
+ // Current service state of apiService.
+ // +listType=map
+ // +listMapKey=type
+ Conditions []APIServiceCondition
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIService represents a server for a particular GroupVersion.
+// Name must be "version.group".
+type APIService struct {
+ metav1.TypeMeta
+ metav1.ObjectMeta
+
+ // Spec contains information for locating and communicating with a server
+ Spec APIServiceSpec
+ // Status contains derived information about an API server
+ Status APIServiceStatus
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go
new file mode 100644
index 0000000000..4a31f94cee
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+ return RegisterDefaults(scheme)
+}
+
+// SetDefaults_ServiceReference sets defaults for AuditSync Webhook's ServiceReference
+func SetDefaults_ServiceReference(obj *ServiceReference) {
+ if obj.Port == nil {
+ obj.Port = ptr.To[int32](443)
+ }
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go
new file mode 100644
index 0000000000..d3dfe0689e
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:conversion-gen=k8s.io/kube-aggregator/pkg/apis/apiregistration
+// +k8s:openapi-gen=true
+// +groupName=apiregistration.k8s.io
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:prerelease-lifecycle-gen=true
+
+// Package v1 contains the API Registration API, which is responsible for
+// registering an API `Group`/`Version` with another kubernetes like API server.
+// The `APIService` holds information about the other API server in
+// `APIServiceSpec` type as well as general `TypeMeta` and `ObjectMeta`. The
+// `APIServiceSpec` type have the main configuration needed to do the
+// aggregation. Any request coming for specified `Group`/`Version` will be
+// directed to the service defined by `ServiceReference` (on port 443) after
+// validating the target using provided `CABundle` or skipping validation
+// if development flag `InsecureSkipTLSVerify` is set. `Priority` is controlling
+// the order of this API group in the overall discovery document.
+// The return status is a set of conditions for this aggregation. Currently
+// there is only one condition named "Available", if true, it means the
+// api/server requests will be redirected to specified API server.
+package v1
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go
new file mode 100644
index 0000000000..690810e8bb
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go
@@ -0,0 +1,1813 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *APIService) Reset() { *m = APIService{} }
+func (*APIService) ProtoMessage() {}
+func (*APIService) Descriptor() ([]byte, []int) {
+ return fileDescriptor_93cf925561aed99f, []int{0}
+}
+func (m *APIService) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIService) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIService.Merge(m, src)
+}
+func (m *APIService) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIService) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIService.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIService proto.InternalMessageInfo
+
+func (m *APIServiceCondition) Reset() { *m = APIServiceCondition{} }
+func (*APIServiceCondition) ProtoMessage() {}
+func (*APIServiceCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_93cf925561aed99f, []int{1}
+}
+func (m *APIServiceCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceCondition.Merge(m, src)
+}
+func (m *APIServiceCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceCondition proto.InternalMessageInfo
+
+func (m *APIServiceList) Reset() { *m = APIServiceList{} }
+func (*APIServiceList) ProtoMessage() {}
+func (*APIServiceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_93cf925561aed99f, []int{2}
+}
+func (m *APIServiceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceList.Merge(m, src)
+}
+func (m *APIServiceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceList proto.InternalMessageInfo
+
+func (m *APIServiceSpec) Reset() { *m = APIServiceSpec{} }
+func (*APIServiceSpec) ProtoMessage() {}
+func (*APIServiceSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_93cf925561aed99f, []int{3}
+}
+func (m *APIServiceSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceSpec.Merge(m, src)
+}
+func (m *APIServiceSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceSpec proto.InternalMessageInfo
+
+func (m *APIServiceStatus) Reset() { *m = APIServiceStatus{} }
+func (*APIServiceStatus) ProtoMessage() {}
+func (*APIServiceStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_93cf925561aed99f, []int{4}
+}
+func (m *APIServiceStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceStatus.Merge(m, src)
+}
+func (m *APIServiceStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceStatus proto.InternalMessageInfo
+
+func (m *ServiceReference) Reset() { *m = ServiceReference{} }
+func (*ServiceReference) ProtoMessage() {}
+func (*ServiceReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_93cf925561aed99f, []int{5}
+}
+func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceReference.Merge(m, src)
+}
+func (m *ServiceReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*APIService)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIService")
+ proto.RegisterType((*APIServiceCondition)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceCondition")
+ proto.RegisterType((*APIServiceList)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceList")
+ proto.RegisterType((*APIServiceSpec)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceSpec")
+ proto.RegisterType((*APIServiceStatus)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceStatus")
+ proto.RegisterType((*ServiceReference)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.ServiceReference")
+}
+
+func init() {
+ proto.RegisterFile("k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto", fileDescriptor_93cf925561aed99f)
+}
+
+var fileDescriptor_93cf925561aed99f = []byte{
+ // 826 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6b, 0x2b, 0x45,
+ 0x18, 0xce, 0xb6, 0x49, 0x9b, 0x4e, 0xeb, 0x69, 0x1d, 0xcf, 0xe1, 0x2c, 0xe5, 0xb8, 0xad, 0x11,
+ 0x34, 0x0a, 0x67, 0xd7, 0x06, 0x11, 0x45, 0x10, 0xba, 0x47, 0x28, 0x85, 0x56, 0xc3, 0xa4, 0x14,
+ 0x11, 0x41, 0x27, 0x9b, 0xb7, 0xdb, 0x31, 0xdd, 0x0f, 0x66, 0x66, 0x03, 0xc1, 0x1b, 0xc1, 0x1f,
+ 0xa0, 0xbf, 0xc9, 0xab, 0x5e, 0x1e, 0xf0, 0xa6, 0x57, 0xc1, 0xc4, 0x7f, 0x71, 0xae, 0x64, 0x66,
+ 0x67, 0x77, 0xd3, 0x34, 0xe2, 0xe9, 0xe9, 0x4d, 0xc8, 0xfb, 0xf1, 0x3c, 0xcf, 0x3b, 0xef, 0x3c,
+ 0x99, 0x20, 0x7f, 0xf8, 0xb9, 0x70, 0x59, 0xe2, 0x0d, 0xb3, 0x3e, 0x3c, 0xa7, 0x61, 0xc8, 0x21,
+ 0xa4, 0x32, 0xe1, 0x5e, 0x3a, 0x0c, 0x3d, 0x9a, 0x32, 0xa1, 0x3e, 0x38, 0x84, 0x4c, 0x48, 0x4e,
+ 0x25, 0x4b, 0x62, 0x6f, 0x74, 0xe0, 0x85, 0x10, 0x03, 0xa7, 0x12, 0x06, 0x6e, 0xca, 0x13, 0x99,
+ 0xe0, 0x4e, 0xce, 0xe1, 0x2a, 0x8e, 0x1f, 0x2b, 0x0e, 0x37, 0x1d, 0x86, 0xae, 0xe2, 0x70, 0x17,
+ 0x38, 0xdc, 0xd1, 0xc1, 0xee, 0xf3, 0x90, 0xc9, 0xcb, 0xac, 0xef, 0x06, 0x49, 0xe4, 0x85, 0x49,
+ 0x98, 0x78, 0x9a, 0xaa, 0x9f, 0x5d, 0xe8, 0x48, 0x07, 0xfa, 0x5b, 0x2e, 0xb1, 0xfb, 0xa9, 0x19,
+ 0x93, 0xa6, 0x2c, 0xa2, 0xc1, 0x25, 0x8b, 0x81, 0x8f, 0xab, 0x19, 0x23, 0x90, 0x74, 0xc9, 0x60,
+ 0xbb, 0xde, 0x7f, 0xa1, 0x78, 0x16, 0x4b, 0x16, 0xc1, 0x1d, 0xc0, 0x67, 0xff, 0x07, 0x10, 0xc1,
+ 0x25, 0x44, 0x74, 0x11, 0xd7, 0xfa, 0x73, 0x05, 0xa1, 0xc3, 0xee, 0x71, 0x0f, 0xf8, 0x88, 0x05,
+ 0x80, 0x7f, 0x42, 0x4d, 0x35, 0xd2, 0x80, 0x4a, 0x6a, 0x5b, 0xfb, 0x56, 0x7b, 0xb3, 0xf3, 0x89,
+ 0x6b, 0x76, 0x34, 0xcf, 0x5c, 0x2d, 0x48, 0x75, 0xbb, 0xa3, 0x03, 0xf7, 0xdb, 0xfe, 0xcf, 0x10,
+ 0xc8, 0x53, 0x90, 0xd4, 0xc7, 0xd7, 0x93, 0xbd, 0xda, 0x6c, 0xb2, 0x87, 0xaa, 0x1c, 0x29, 0x59,
+ 0xf1, 0x00, 0xd5, 0x45, 0x0a, 0x81, 0xbd, 0xa2, 0xd9, 0x7d, 0xf7, 0xfe, 0x37, 0xe0, 0x56, 0xf3,
+ 0xf6, 0x52, 0x08, 0xfc, 0x2d, 0xa3, 0x57, 0x57, 0x11, 0xd1, 0xec, 0xf8, 0x0a, 0xad, 0x09, 0x49,
+ 0x65, 0x26, 0xec, 0x55, 0xad, 0xf3, 0xf5, 0x03, 0x75, 0x34, 0x97, 0xff, 0xc8, 0x28, 0xad, 0xe5,
+ 0x31, 0x31, 0x1a, 0xad, 0x9b, 0x15, 0xf4, 0x4e, 0xd5, 0xfc, 0x22, 0x89, 0x07, 0x4c, 0x71, 0xe0,
+ 0x2f, 0x51, 0x5d, 0x8e, 0x53, 0xd0, 0x9b, 0xdc, 0xf0, 0x3f, 0x2c, 0xe6, 0x3c, 0x1b, 0xa7, 0xf0,
+ 0x6a, 0xb2, 0xf7, 0x74, 0x09, 0x44, 0x95, 0x88, 0x06, 0xe1, 0x2f, 0xca, 0x23, 0xac, 0x68, 0xf8,
+ 0x7b, 0xb7, 0xc5, 0x5f, 0x4d, 0xf6, 0xb6, 0x4b, 0xd8, 0xed, 0x79, 0xf0, 0x08, 0xe1, 0x2b, 0x2a,
+ 0xe4, 0x19, 0xa7, 0xb1, 0xc8, 0x69, 0x59, 0x04, 0x66, 0x13, 0x1f, 0xbf, 0xde, 0x7d, 0x2a, 0x84,
+ 0xbf, 0x6b, 0x24, 0xf1, 0xc9, 0x1d, 0x36, 0xb2, 0x44, 0x01, 0x7f, 0x80, 0xd6, 0x38, 0x50, 0x91,
+ 0xc4, 0x76, 0x5d, 0x8f, 0x5c, 0xee, 0x8b, 0xe8, 0x2c, 0x31, 0x55, 0xfc, 0x11, 0x5a, 0x8f, 0x40,
+ 0x08, 0x1a, 0x82, 0xdd, 0xd0, 0x8d, 0xdb, 0xa6, 0x71, 0xfd, 0x34, 0x4f, 0x93, 0xa2, 0xde, 0xfa,
+ 0xcb, 0x42, 0x8f, 0xaa, 0x3d, 0x9d, 0x30, 0x21, 0xf1, 0x0f, 0x77, 0x3c, 0xea, 0xbe, 0xde, 0x99,
+ 0x14, 0x5a, 0x3b, 0x74, 0xc7, 0xc8, 0x35, 0x8b, 0xcc, 0x9c, 0x3f, 0x03, 0xd4, 0x60, 0x12, 0x22,
+ 0xb5, 0xf5, 0xd5, 0xf6, 0x66, 0xe7, 0xab, 0x87, 0x19, 0xc7, 0x7f, 0xcb, 0x48, 0x35, 0x8e, 0x15,
+ 0x29, 0xc9, 0xb9, 0x5b, 0xd3, 0xd5, 0xf9, 0x53, 0x29, 0xdf, 0xe2, 0x21, 0x5a, 0x17, 0x79, 0x68,
+ 0x0e, 0xf5, 0x46, 0x96, 0x35, 0x8c, 0x04, 0x2e, 0x80, 0x43, 0x1c, 0x80, 0xbf, 0xa9, 0xb6, 0x5a,
+ 0x64, 0x0b, 0x05, 0xfc, 0x3e, 0x6a, 0x84, 0x3c, 0xc9, 0x52, 0x63, 0xad, 0x72, 0xc8, 0x23, 0x95,
+ 0x24, 0x79, 0x4d, 0xdd, 0xd2, 0x08, 0xb8, 0x60, 0x49, 0xac, 0xad, 0x33, 0x77, 0x4b, 0xe7, 0x79,
+ 0x9a, 0x14, 0x75, 0xdc, 0x43, 0x4f, 0x58, 0x2c, 0x20, 0xc8, 0x38, 0xf4, 0x86, 0x2c, 0x3d, 0x3b,
+ 0xe9, 0x9d, 0x03, 0x67, 0x17, 0x63, 0xed, 0x83, 0xa6, 0xff, 0xae, 0x01, 0x3e, 0x39, 0x5e, 0xd6,
+ 0x44, 0x96, 0x63, 0x71, 0x1b, 0x35, 0x03, 0xea, 0x67, 0xf1, 0xe0, 0x2a, 0xb7, 0xc9, 0x96, 0xbf,
+ 0xa5, 0xee, 0xec, 0xc5, 0x61, 0x9e, 0x23, 0x65, 0x15, 0x77, 0xd1, 0x63, 0x3d, 0x72, 0x97, 0xb3,
+ 0x84, 0x33, 0x39, 0x3e, 0x65, 0x31, 0x8b, 0xb2, 0xc8, 0x5e, 0xdf, 0xb7, 0xda, 0x0d, 0xff, 0x99,
+ 0x51, 0x7f, 0x7c, 0xb4, 0xa4, 0x87, 0x2c, 0x45, 0xe2, 0x43, 0xb4, 0x6d, 0xce, 0x56, 0x54, 0xec,
+ 0xa6, 0x26, 0x7b, 0x6a, 0xc8, 0xb6, 0xcf, 0x6f, 0x97, 0xc9, 0x62, 0x7f, 0xeb, 0x77, 0x0b, 0xed,
+ 0x2c, 0xbe, 0x20, 0xf8, 0x17, 0x84, 0x82, 0xe2, 0x47, 0x2b, 0x6c, 0x4b, 0x5b, 0xec, 0xe8, 0x61,
+ 0x16, 0x2b, 0x1f, 0x81, 0xea, 0xe1, 0x2d, 0x53, 0x82, 0xcc, 0xc9, 0xb5, 0x7e, 0xb3, 0xd0, 0xce,
+ 0xa2, 0x41, 0xb0, 0x87, 0x36, 0x62, 0x1a, 0x81, 0x48, 0x69, 0x50, 0x3c, 0x54, 0x6f, 0x1b, 0x9e,
+ 0x8d, 0x6f, 0x8a, 0x02, 0xa9, 0x7a, 0xf0, 0x3e, 0xaa, 0xab, 0xc0, 0x58, 0xa7, 0x7c, 0x7c, 0x55,
+ 0x2f, 0xd1, 0x15, 0xfc, 0x0c, 0xd5, 0xd3, 0x84, 0x4b, 0xed, 0x9a, 0x86, 0xdf, 0x54, 0xd5, 0x6e,
+ 0xc2, 0x25, 0xd1, 0x59, 0xff, 0xbb, 0xeb, 0xa9, 0x53, 0x7b, 0x39, 0x75, 0x6a, 0x37, 0x53, 0xa7,
+ 0xf6, 0xeb, 0xcc, 0xb1, 0xae, 0x67, 0x8e, 0xf5, 0x72, 0xe6, 0x58, 0x37, 0x33, 0xc7, 0xfa, 0x7b,
+ 0xe6, 0x58, 0x7f, 0xfc, 0xe3, 0xd4, 0xbe, 0xef, 0xdc, 0xff, 0xdf, 0xfd, 0xdf, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x19, 0x6e, 0x3d, 0x66, 0x12, 0x08, 0x00, 0x00,
+}
+
+func (m *APIService) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIService) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIService) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.VersionPriority))
+ i--
+ dAtA[i] = 0x40
+ i = encodeVarintGenerated(dAtA, i, uint64(m.GroupPriorityMinimum))
+ i--
+ dAtA[i] = 0x38
+ if m.CABundle != nil {
+ i -= len(m.CABundle)
+ copy(dAtA[i:], m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ i--
+ if m.InsecureSkipTLSVerify {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Group)
+ copy(dAtA[i:], m.Group)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+ i--
+ dAtA[i] = 0x12
+ if m.Service != nil {
+ {
+ size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Port != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
+ i--
+ dAtA[i] = 0x18
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *APIService) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *APIServiceCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *APIServiceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *APIServiceSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.CABundle != nil {
+ l = len(m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.GroupPriorityMinimum))
+ n += 1 + sovGenerated(uint64(m.VersionPriority))
+ return n
+}
+
+func (m *APIServiceStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServiceReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Port != nil {
+ n += 1 + sovGenerated(uint64(*m.Port))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *APIService) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&APIService{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "APIServiceSpec", "APIServiceSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "APIServiceStatus", "APIServiceStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&APIServiceCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]APIService{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIService", "APIService", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&APIServiceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&APIServiceSpec{`,
+ `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
+ `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `InsecureSkipTLSVerify:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerify) + `,`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `GroupPriorityMinimum:` + fmt.Sprintf("%v", this.GroupPriorityMinimum) + `,`,
+ `VersionPriority:` + fmt.Sprintf("%v", this.VersionPriority) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]APIServiceCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "APIServiceCondition", "APIServiceCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&APIServiceStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceReference{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Port:` + valueToStringGenerated(this.Port) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *APIService) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIService: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIService: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = APIServiceConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, APIService{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Service == nil {
+ m.Service = &ServiceReference{}
+ }
+ if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerify", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.InsecureSkipTLSVerify = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...)
+ if m.CABundle == nil {
+ m.CABundle = []byte{}
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupPriorityMinimum", wireType)
+ }
+ m.GroupPriorityMinimum = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.GroupPriorityMinimum |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VersionPriority", wireType)
+ }
+ m.VersionPriority = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.VersionPriority |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, APIServiceCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Port = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto
new file mode 100644
index 0000000000..5571387ef8
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto
@@ -0,0 +1,151 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.kube_aggregator.pkg.apis.apiregistration.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1";
+
+// APIService represents a server for a particular GroupVersion.
+// Name must be "version.group".
+message APIService {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains information for locating and communicating with a server
+ optional APIServiceSpec spec = 2;
+
+ // Status contains derived information about an API server
+ optional APIServiceStatus status = 3;
+}
+
+// APIServiceCondition describes the state of an APIService at a particular point
+message APIServiceCondition {
+ // Type is the type of the condition.
+ optional string type = 1;
+
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // Human-readable message indicating details about last transition.
+ // +optional
+ optional string message = 5;
+}
+
+// APIServiceList is a list of APIService objects.
+message APIServiceList {
+ // Standard list metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of APIService
+ repeated APIService items = 2;
+}
+
+// APIServiceSpec contains information for locating and communicating with a server.
+// Only https is supported, though you are able to disable certificate verification.
+message APIServiceSpec {
+ // Service is a reference to the service for this API server. It must communicate
+ // on port 443.
+ // If the Service is nil, that means the handling for the API groupversion is handled locally on this server.
+ // The call will simply delegate to the normal handler chain to be fulfilled.
+ // +optional
+ optional ServiceReference service = 1;
+
+ // Group is the API group name this server hosts
+ optional string group = 2;
+
+ // Version is the API version this server hosts. For example, "v1"
+ optional string version = 3;
+
+ // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server.
+ // This is strongly discouraged. You should use the CABundle instead.
+ optional bool insecureSkipTLSVerify = 4;
+
+ // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate.
+ // If unspecified, system trust roots on the apiserver are used.
+ // +listType=atomic
+ // +optional
+ optional bytes caBundle = 5;
+
+ // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones.
+ // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority.
+ // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10).
+ // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo)
+ // We'd recommend something like: *.k8s.io (except extensions) at 18000 and
+ // PaaSes (OpenShift, Deis) are recommended to be in the 2000s
+ optional int32 groupPriorityMinimum = 7;
+
+ // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero.
+ // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10).
+ // Since it's inside of a group, the number can be small, probably in the 10s.
+ // In case of equal version priorities, the version string will be used to compute the order inside a group.
+ // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered
+ // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version),
+ // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first
+ // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major
+ // version, then minor version. An example sorted list of versions:
+ // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
+ optional int32 versionPriority = 8;
+}
+
+// APIServiceStatus contains derived information about an API server
+message APIServiceStatus {
+ // Current service state of apiService.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ repeated APIServiceCondition conditions = 1;
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+message ServiceReference {
+ // Namespace is the namespace of the service
+ optional string namespace = 1;
+
+ // Name is the name of the service
+ optional string name = 2;
+
+ // If specified, the port on the service that hosting webhook.
+ // Default to 443 for backward compatibility.
+ // `port` should be a valid port number (1-65535, inclusive).
+ // +optional
+ optional int32 port = 3;
+}
+
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go
new file mode 100644
index 0000000000..07e65bf045
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the API group for apiregistration
+const GroupName = "apiregistration.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
+}
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &APIService{},
+ &APIServiceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go
new file mode 100644
index 0000000000..fe5f64c0e1
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go
@@ -0,0 +1,164 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.10
+
+// APIServiceList is a list of APIService objects.
+type APIServiceList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of APIService
+ Items []APIService `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+type ServiceReference struct {
+ // Namespace is the namespace of the service
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+ // Name is the name of the service
+ Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
+ // If specified, the port on the service that hosting webhook.
+ // Default to 443 for backward compatibility.
+ // `port` should be a valid port number (1-65535, inclusive).
+ // +optional
+ Port *int32 `json:"port,omitempty" protobuf:"varint,3,opt,name=port"`
+}
+
+// APIServiceSpec contains information for locating and communicating with a server.
+// Only https is supported, though you are able to disable certificate verification.
+type APIServiceSpec struct {
+ // Service is a reference to the service for this API server. It must communicate
+ // on port 443.
+ // If the Service is nil, that means the handling for the API groupversion is handled locally on this server.
+ // The call will simply delegate to the normal handler chain to be fulfilled.
+ // +optional
+ Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"`
+ // Group is the API group name this server hosts
+ Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"`
+ // Version is the API version this server hosts. For example, "v1"
+ Version string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"`
+
+ // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server.
+ // This is strongly discouraged. You should use the CABundle instead.
+ InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty" protobuf:"varint,4,opt,name=insecureSkipTLSVerify"`
+ // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate.
+ // If unspecified, system trust roots on the apiserver are used.
+ // +listType=atomic
+ // +optional
+ CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,5,opt,name=caBundle"`
+
+ // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones.
+ // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority.
+ // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10).
+ // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo)
+ // We'd recommend something like: *.k8s.io (except extensions) at 18000 and
+ // PaaSes (OpenShift, Deis) are recommended to be in the 2000s
+ GroupPriorityMinimum int32 `json:"groupPriorityMinimum" protobuf:"varint,7,opt,name=groupPriorityMinimum"`
+
+ // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero.
+ // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10).
+ // Since it's inside of a group, the number can be small, probably in the 10s.
+ // In case of equal version priorities, the version string will be used to compute the order inside a group.
+ // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered
+ // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version),
+ // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first
+ // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major
+ // version, then minor version. An example sorted list of versions:
+ // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
+ VersionPriority int32 `json:"versionPriority" protobuf:"varint,8,opt,name=versionPriority"`
+
+ // leaving this here so everyone remembers why proto index 6 is skipped
+ // Priority int64 `json:"priority" protobuf:"varint,6,opt,name=priority"`
+}
+
+// ConditionStatus indicates the status of a condition (true, false, or unknown).
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition;
+// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// APIServiceConditionType is a valid value for APIServiceCondition.Type
+type APIServiceConditionType string
+
+const (
+ // Available indicates that the service exists and is reachable
+ Available APIServiceConditionType = "Available"
+)
+
+// APIServiceCondition describes the state of an APIService at a particular point
+type APIServiceCondition struct {
+ // Type is the type of the condition.
+ Type APIServiceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=APIServiceConditionType"`
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // Human-readable message indicating details about last transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// APIServiceStatus contains derived information about an API server
+type APIServiceStatus struct {
+ // Current service state of apiService.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []APIServiceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.10
+
+// APIService represents a server for a particular GroupVersion.
+// Name must be "version.group".
+type APIService struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec contains information for locating and communicating with a server
+ Spec APIServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains derived information about an API server
+ Status APIServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go
new file mode 100644
index 0000000000..208e23efd8
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go
@@ -0,0 +1,299 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ unsafe "unsafe"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*APIService)(nil), (*apiregistration.APIService)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_APIService_To_apiregistration_APIService(a.(*APIService), b.(*apiregistration.APIService), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIService)(nil), (*APIService)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIService_To_v1_APIService(a.(*apiregistration.APIService), b.(*APIService), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceCondition)(nil), (*apiregistration.APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(a.(*APIServiceCondition), b.(*apiregistration.APIServiceCondition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceCondition)(nil), (*APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(a.(*apiregistration.APIServiceCondition), b.(*APIServiceCondition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceList)(nil), (*apiregistration.APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_APIServiceList_To_apiregistration_APIServiceList(a.(*APIServiceList), b.(*apiregistration.APIServiceList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceList)(nil), (*APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceList_To_v1_APIServiceList(a.(*apiregistration.APIServiceList), b.(*APIServiceList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceSpec)(nil), (*apiregistration.APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(a.(*APIServiceSpec), b.(*apiregistration.APIServiceSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceSpec)(nil), (*APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(a.(*apiregistration.APIServiceSpec), b.(*APIServiceSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceStatus)(nil), (*apiregistration.APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(a.(*APIServiceStatus), b.(*apiregistration.APIServiceStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceStatus)(nil), (*APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(a.(*apiregistration.APIServiceStatus), b.(*APIServiceStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_ServiceReference_To_apiregistration_ServiceReference(a.(*ServiceReference), b.(*apiregistration.ServiceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_ServiceReference_To_v1_ServiceReference(a.(*apiregistration.ServiceReference), b.(*ServiceReference), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1_APIService_To_apiregistration_APIService is an autogenerated conversion function.
+func Convert_v1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error {
+ return autoConvert_v1_APIService_To_apiregistration_APIService(in, out, s)
+}
+
+func autoConvert_apiregistration_APIService_To_v1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_apiregistration_APIService_To_v1_APIService is an autogenerated conversion function.
+func Convert_apiregistration_APIService_To_v1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIService_To_v1_APIService(in, out, s)
+}
+
+func autoConvert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error {
+ out.Type = apiregistration.APIServiceConditionType(in.Type)
+ out.Status = apiregistration.ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+// Convert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition is an autogenerated conversion function.
+func Convert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error {
+ return autoConvert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error {
+ out.Type = APIServiceConditionType(in.Type)
+ out.Status = ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+// Convert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(in, out, s)
+}
+
+func autoConvert_v1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]apiregistration.APIService, len(*in))
+ for i := range *in {
+ if err := Convert_v1_APIService_To_apiregistration_APIService(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1_APIServiceList_To_apiregistration_APIServiceList is an autogenerated conversion function.
+func Convert_v1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error {
+ return autoConvert_v1_APIServiceList_To_apiregistration_APIServiceList(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceList_To_v1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIService, len(*in))
+ for i := range *in {
+ if err := Convert_apiregistration_APIService_To_v1_APIService(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_apiregistration_APIServiceList_To_v1_APIServiceList is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceList_To_v1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceList_To_v1_APIServiceList(in, out, s)
+}
+
+func autoConvert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error {
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(apiregistration.ServiceReference)
+ if err := Convert_v1_ServiceReference_To_apiregistration_ServiceReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Service = nil
+ }
+ out.Group = in.Group
+ out.Version = in.Version
+ out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+ out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
+ out.GroupPriorityMinimum = in.GroupPriorityMinimum
+ out.VersionPriority = in.VersionPriority
+ return nil
+}
+
+// Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec is an autogenerated conversion function.
+func Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error {
+ return autoConvert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error {
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(ServiceReference)
+ if err := Convert_apiregistration_ServiceReference_To_v1_ServiceReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Service = nil
+ }
+ out.Group = in.Group
+ out.Version = in.Version
+ out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+ out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
+ out.GroupPriorityMinimum = in.GroupPriorityMinimum
+ out.VersionPriority = in.VersionPriority
+ return nil
+}
+
+// Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(in, out, s)
+}
+
+func autoConvert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]apiregistration.APIServiceCondition)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus is an autogenerated conversion function.
+func Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error {
+ return autoConvert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]APIServiceCondition)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(in, out, s)
+}
+
+func autoConvert_v1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error {
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ if err := metav1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1_ServiceReference_To_apiregistration_ServiceReference is an autogenerated conversion function.
+func Convert_v1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error {
+ return autoConvert_v1_ServiceReference_To_apiregistration_ServiceReference(in, out, s)
+}
+
+func autoConvert_apiregistration_ServiceReference_To_v1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error {
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ if err := metav1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_apiregistration_ServiceReference_To_v1_ServiceReference is an autogenerated conversion function.
+func Convert_apiregistration_ServiceReference_To_v1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error {
+ return autoConvert_apiregistration_ServiceReference_To_v1_ServiceReference(in, out, s)
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..6388772450
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go
@@ -0,0 +1,174 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIService) DeepCopyInto(out *APIService) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIService.
+func (in *APIService) DeepCopy() *APIService {
+ if in == nil {
+ return nil
+ }
+ out := new(APIService)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIService) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceCondition) DeepCopyInto(out *APIServiceCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceCondition.
+func (in *APIServiceCondition) DeepCopy() *APIServiceCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceList) DeepCopyInto(out *APIServiceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIService, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceList.
+func (in *APIServiceList) DeepCopy() *APIServiceList {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIServiceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceSpec) DeepCopyInto(out *APIServiceSpec) {
+ *out = *in
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(ServiceReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceSpec.
+func (in *APIServiceSpec) DeepCopy() *APIServiceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceStatus) DeepCopyInto(out *APIServiceStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]APIServiceCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceStatus.
+func (in *APIServiceStatus) DeepCopy() *APIServiceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
+ *out = *in
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
+func (in *ServiceReference) DeepCopy() *ServiceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go
new file mode 100644
index 0000000000..175637ca5a
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go
@@ -0,0 +1,48 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ scheme.AddTypeDefaultingFunc(&APIService{}, func(obj interface{}) { SetObjectDefaults_APIService(obj.(*APIService)) })
+ scheme.AddTypeDefaultingFunc(&APIServiceList{}, func(obj interface{}) { SetObjectDefaults_APIServiceList(obj.(*APIServiceList)) })
+ return nil
+}
+
+func SetObjectDefaults_APIService(in *APIService) {
+ if in.Spec.Service != nil {
+ SetDefaults_ServiceReference(in.Spec.Service)
+ }
+}
+
+func SetObjectDefaults_APIServiceList(in *APIServiceList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_APIService(a)
+ }
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 0000000000..14d3e1f48d
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,34 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *APIService) APILifecycleIntroduced() (major, minor int) {
+ return 1, 10
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *APIServiceList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 10
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go
new file mode 100644
index 0000000000..d9c5af94e6
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/utils/ptr"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+ return RegisterDefaults(scheme)
+}
+
+// SetDefaults_ServiceReference sets defaults for AuditSync Webhook's ServiceReference
+func SetDefaults_ServiceReference(obj *ServiceReference) {
+ if obj.Port == nil {
+ obj.Port = ptr.To[int32](443)
+ }
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go
new file mode 100644
index 0000000000..40d1131ca0
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:conversion-gen=k8s.io/kube-aggregator/pkg/apis/apiregistration
+// +k8s:openapi-gen=true
+// +groupName=apiregistration.k8s.io
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:prerelease-lifecycle-gen=true
+
+// Package v1beta1 contains the API Registration API, which is responsible for
+// registering an API `Group`/`Version` with another kubernetes like API server.
+// The `APIService` holds information about the other API server in
+// `APIServiceSpec` type as well as general `TypeMeta` and `ObjectMeta`. The
+// `APIServiceSpec` type have the main configuration needed to do the
+// aggregation. Any request coming for specified `Group`/`Version` will be
+// directed to the service defined by `ServiceReference` (on port 443) after
+// validating the target using provided `CABundle` or skipping validation
+// if development flag `InsecureSkipTLSVerify` is set. `Priority` is controlling
+// the order of this API group in the overall discovery document.
+// The return status is a set of conditions for this aggregation. Currently
+// there is only one condition named "Available", if true, it means the
+// api/server requests will be redirected to specified API server.
+package v1beta1
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go
new file mode 100644
index 0000000000..8f1a4c5ff4
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go
@@ -0,0 +1,1814 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *APIService) Reset() { *m = APIService{} }
+func (*APIService) ProtoMessage() {}
+func (*APIService) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6acc79c5d169026d, []int{0}
+}
+func (m *APIService) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIService) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIService.Merge(m, src)
+}
+func (m *APIService) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIService) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIService.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIService proto.InternalMessageInfo
+
+func (m *APIServiceCondition) Reset() { *m = APIServiceCondition{} }
+func (*APIServiceCondition) ProtoMessage() {}
+func (*APIServiceCondition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6acc79c5d169026d, []int{1}
+}
+func (m *APIServiceCondition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceCondition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceCondition.Merge(m, src)
+}
+func (m *APIServiceCondition) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceCondition) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceCondition proto.InternalMessageInfo
+
+func (m *APIServiceList) Reset() { *m = APIServiceList{} }
+func (*APIServiceList) ProtoMessage() {}
+func (*APIServiceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6acc79c5d169026d, []int{2}
+}
+func (m *APIServiceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceList.Merge(m, src)
+}
+func (m *APIServiceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceList proto.InternalMessageInfo
+
+func (m *APIServiceSpec) Reset() { *m = APIServiceSpec{} }
+func (*APIServiceSpec) ProtoMessage() {}
+func (*APIServiceSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6acc79c5d169026d, []int{3}
+}
+func (m *APIServiceSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceSpec.Merge(m, src)
+}
+func (m *APIServiceSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceSpec proto.InternalMessageInfo
+
+func (m *APIServiceStatus) Reset() { *m = APIServiceStatus{} }
+func (*APIServiceStatus) ProtoMessage() {}
+func (*APIServiceStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6acc79c5d169026d, []int{4}
+}
+func (m *APIServiceStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *APIServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *APIServiceStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_APIServiceStatus.Merge(m, src)
+}
+func (m *APIServiceStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *APIServiceStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_APIServiceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIServiceStatus proto.InternalMessageInfo
+
+func (m *ServiceReference) Reset() { *m = ServiceReference{} }
+func (*ServiceReference) ProtoMessage() {}
+func (*ServiceReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6acc79c5d169026d, []int{5}
+}
+func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceReference.Merge(m, src)
+}
+func (m *ServiceReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*APIService)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIService")
+ proto.RegisterType((*APIServiceCondition)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceCondition")
+ proto.RegisterType((*APIServiceList)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceList")
+ proto.RegisterType((*APIServiceSpec)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceSpec")
+ proto.RegisterType((*APIServiceStatus)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceStatus")
+ proto.RegisterType((*ServiceReference)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.ServiceReference")
+}
+
+func init() {
+ proto.RegisterFile("k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto", fileDescriptor_6acc79c5d169026d)
+}
+
+var fileDescriptor_6acc79c5d169026d = []byte{
+ // 833 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x6f, 0xe3, 0x44,
+ 0x18, 0x8e, 0xdb, 0xa4, 0x4d, 0xa7, 0x65, 0x5b, 0x86, 0x5d, 0xad, 0x55, 0x2d, 0x6e, 0x09, 0x12,
+ 0x14, 0xa4, 0xb5, 0xe9, 0x0a, 0xb1, 0x20, 0x4e, 0x75, 0x0f, 0x55, 0xa5, 0x16, 0xa2, 0x49, 0xd5,
+ 0x03, 0x02, 0xc1, 0xc4, 0x79, 0xeb, 0x0c, 0x59, 0x7f, 0x30, 0x33, 0x8e, 0x94, 0xdb, 0x4a, 0xfc,
+ 0x01, 0x2e, 0xfc, 0xa7, 0x1e, 0x38, 0xec, 0x31, 0xa7, 0x88, 0x06, 0x89, 0x1f, 0xb1, 0x27, 0x34,
+ 0xe3, 0xb1, 0x9d, 0x26, 0x41, 0x5b, 0x55, 0xbd, 0x44, 0x79, 0x3f, 0x9e, 0xe7, 0x79, 0xe7, 0x9d,
+ 0x27, 0xa3, 0xa0, 0x93, 0xc1, 0xd7, 0xc2, 0x65, 0x89, 0x37, 0xc8, 0xba, 0xf0, 0x9c, 0x86, 0x21,
+ 0x87, 0x90, 0xca, 0x84, 0x7b, 0xe9, 0x20, 0xf4, 0x68, 0xca, 0x84, 0xfa, 0xe0, 0x10, 0x32, 0x21,
+ 0x39, 0x95, 0x2c, 0x89, 0xbd, 0xe1, 0x61, 0x17, 0x24, 0x3d, 0xf4, 0x42, 0x88, 0x81, 0x53, 0x09,
+ 0x3d, 0x37, 0xe5, 0x89, 0x4c, 0xf0, 0xcb, 0x9c, 0xc8, 0x55, 0x44, 0x3f, 0x57, 0x44, 0x6e, 0x3a,
+ 0x08, 0x5d, 0x45, 0xe4, 0xce, 0x11, 0xb9, 0x86, 0x68, 0xf7, 0x79, 0xc8, 0x64, 0x3f, 0xeb, 0xba,
+ 0x41, 0x12, 0x79, 0x61, 0x12, 0x26, 0x9e, 0xe6, 0xeb, 0x66, 0x57, 0x3a, 0xd2, 0x81, 0xfe, 0x96,
+ 0xeb, 0xec, 0x7e, 0x69, 0x06, 0xa6, 0x29, 0x8b, 0x68, 0xd0, 0x67, 0x31, 0xf0, 0x51, 0x35, 0x6d,
+ 0x04, 0x92, 0x7a, 0xc3, 0x85, 0xe9, 0x76, 0xbd, 0xff, 0x43, 0xf1, 0x2c, 0x96, 0x2c, 0x82, 0x05,
+ 0xc0, 0x57, 0xef, 0x02, 0x88, 0xa0, 0x0f, 0x11, 0x9d, 0xc7, 0xb5, 0xfe, 0x5a, 0x41, 0xe8, 0xa8,
+ 0x7d, 0xda, 0x01, 0x3e, 0x64, 0x01, 0xe0, 0x5f, 0x50, 0x53, 0x8d, 0xd4, 0xa3, 0x92, 0xda, 0xd6,
+ 0xbe, 0x75, 0xb0, 0xf9, 0xe2, 0x0b, 0xd7, 0x2c, 0x6a, 0x96, 0xb9, 0xda, 0x92, 0xea, 0x76, 0x87,
+ 0x87, 0xee, 0xf7, 0xdd, 0x5f, 0x21, 0x90, 0xe7, 0x20, 0xa9, 0x8f, 0xaf, 0x27, 0x7b, 0xb5, 0xe9,
+ 0x64, 0x0f, 0x55, 0x39, 0x52, 0xb2, 0x62, 0x86, 0xea, 0x22, 0x85, 0xc0, 0x5e, 0xd1, 0xec, 0x27,
+ 0xee, 0x3d, 0xaf, 0xc1, 0xad, 0x86, 0xee, 0xa4, 0x10, 0xf8, 0x5b, 0x46, 0xb4, 0xae, 0x22, 0xa2,
+ 0x25, 0xf0, 0x6f, 0x68, 0x4d, 0x48, 0x2a, 0x33, 0x61, 0xaf, 0x6a, 0xb1, 0xd3, 0x87, 0x10, 0xd3,
+ 0x84, 0xfe, 0x23, 0x23, 0xb7, 0x96, 0xc7, 0xc4, 0x08, 0xb5, 0xc6, 0x2b, 0xe8, 0x83, 0xaa, 0xf9,
+ 0x38, 0x89, 0x7b, 0x4c, 0x11, 0xe1, 0x6f, 0x51, 0x5d, 0x8e, 0x52, 0xd0, 0x3b, 0xdd, 0xf0, 0x3f,
+ 0x2d, 0x86, 0xbd, 0x18, 0xa5, 0xf0, 0x76, 0xb2, 0xf7, 0x74, 0x09, 0x44, 0x95, 0x88, 0x06, 0xe1,
+ 0x6f, 0xca, 0x73, 0xac, 0x68, 0xf8, 0x47, 0xb7, 0xc5, 0xdf, 0x4e, 0xf6, 0xb6, 0x4b, 0xd8, 0xed,
+ 0x79, 0xf0, 0x10, 0xe1, 0x57, 0x54, 0xc8, 0x0b, 0x4e, 0x63, 0x91, 0xd3, 0xb2, 0x08, 0xcc, 0x3a,
+ 0x3e, 0xbf, 0xdb, 0xcd, 0x2a, 0x84, 0xbf, 0x6b, 0x24, 0xf1, 0xd9, 0x02, 0x1b, 0x59, 0xa2, 0x80,
+ 0x3f, 0x41, 0x6b, 0x1c, 0xa8, 0x48, 0x62, 0xbb, 0xae, 0x47, 0x2e, 0xf7, 0x45, 0x74, 0x96, 0x98,
+ 0x2a, 0xfe, 0x0c, 0xad, 0x47, 0x20, 0x04, 0x0d, 0xc1, 0x6e, 0xe8, 0xc6, 0x6d, 0xd3, 0xb8, 0x7e,
+ 0x9e, 0xa7, 0x49, 0x51, 0x6f, 0x8d, 0x2d, 0xf4, 0xa8, 0xda, 0xd3, 0x19, 0x13, 0x12, 0xff, 0xb8,
+ 0xe0, 0x56, 0xf7, 0x6e, 0x67, 0x52, 0x68, 0xed, 0xd5, 0x1d, 0x23, 0xd7, 0x2c, 0x32, 0x33, 0x4e,
+ 0xed, 0xa3, 0x06, 0x93, 0x10, 0xa9, 0xad, 0xaf, 0x1e, 0x6c, 0xbe, 0x38, 0x7e, 0x00, 0xf7, 0xf8,
+ 0xef, 0x19, 0xbd, 0xc6, 0xa9, 0x62, 0x26, 0xb9, 0x40, 0xeb, 0xdf, 0xd5, 0xd9, 0xa3, 0x29, 0x07,
+ 0xe3, 0x14, 0xad, 0x8b, 0x3c, 0x34, 0x27, 0xbb, 0xbf, 0x79, 0x0d, 0x2d, 0x81, 0x2b, 0xe0, 0x10,
+ 0x07, 0xe0, 0x6f, 0xaa, 0xfd, 0x16, 0xd9, 0x42, 0x06, 0x7f, 0x8c, 0x1a, 0x21, 0x4f, 0xb2, 0xd4,
+ 0x98, 0xac, 0x9c, 0xf4, 0x44, 0x25, 0x49, 0x5e, 0x53, 0xf7, 0x35, 0x04, 0x2e, 0x58, 0x12, 0x6b,
+ 0x13, 0xcd, 0xdc, 0xd7, 0x65, 0x9e, 0x26, 0x45, 0x1d, 0x77, 0xd0, 0x13, 0x16, 0x0b, 0x08, 0x32,
+ 0x0e, 0x9d, 0x01, 0x4b, 0x2f, 0xce, 0x3a, 0x97, 0xc0, 0xd9, 0xd5, 0x48, 0x3b, 0xa2, 0xe9, 0x7f,
+ 0x68, 0x80, 0x4f, 0x4e, 0x97, 0x35, 0x91, 0xe5, 0x58, 0x7c, 0x80, 0x9a, 0x01, 0xf5, 0xb3, 0xb8,
+ 0xf7, 0x2a, 0x37, 0xcc, 0x96, 0xbf, 0xa5, 0x6e, 0xef, 0xf8, 0x28, 0xcf, 0x91, 0xb2, 0x8a, 0xdb,
+ 0xe8, 0xb1, 0x1e, 0xb9, 0xcd, 0x59, 0xc2, 0x99, 0x1c, 0x9d, 0xb3, 0x98, 0x45, 0x59, 0x64, 0xaf,
+ 0xef, 0x5b, 0x07, 0x0d, 0xff, 0x99, 0x51, 0x7f, 0x7c, 0xb2, 0xa4, 0x87, 0x2c, 0x45, 0xe2, 0x23,
+ 0xb4, 0x6d, 0xce, 0x56, 0x54, 0xec, 0xa6, 0x26, 0x7b, 0x6a, 0xc8, 0xb6, 0x2f, 0x6f, 0x97, 0xc9,
+ 0x7c, 0x7f, 0xeb, 0x4f, 0x0b, 0xed, 0xcc, 0xbf, 0x25, 0xf8, 0xb5, 0x85, 0x50, 0x50, 0xfc, 0x7e,
+ 0x85, 0x6d, 0x69, 0xb7, 0x9d, 0x3d, 0x80, 0xdb, 0xca, 0x47, 0xa1, 0x7a, 0x92, 0xcb, 0x94, 0x20,
+ 0x33, 0x9a, 0xad, 0xdf, 0x2d, 0xb4, 0x33, 0x6f, 0x13, 0xec, 0xa1, 0x8d, 0x98, 0x46, 0x20, 0x52,
+ 0x1a, 0x14, 0x0f, 0xd7, 0xfb, 0x86, 0x67, 0xe3, 0xbb, 0xa2, 0x40, 0xaa, 0x1e, 0xbc, 0x8f, 0xea,
+ 0x2a, 0x30, 0x06, 0x2a, 0x5f, 0x64, 0xd5, 0x4b, 0x74, 0x05, 0x3f, 0x43, 0xf5, 0x34, 0xe1, 0x52,
+ 0x7b, 0xa7, 0xe1, 0x37, 0x55, 0xb5, 0x9d, 0x70, 0x49, 0x74, 0xd6, 0xff, 0xe9, 0xfa, 0xc6, 0xa9,
+ 0xbd, 0xb9, 0x71, 0x6a, 0xe3, 0x1b, 0xa7, 0xf6, 0x7a, 0xea, 0x58, 0xd7, 0x53, 0xc7, 0x7a, 0x33,
+ 0x75, 0xac, 0xf1, 0xd4, 0xb1, 0xfe, 0x9e, 0x3a, 0xd6, 0x1f, 0xff, 0x38, 0xb5, 0x1f, 0x5e, 0xde,
+ 0xf3, 0x1f, 0xc0, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x50, 0xda, 0x9b, 0x3b, 0x08, 0x00,
+ 0x00,
+}
+
+func (m *APIService) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIService) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIService) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceCondition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceCondition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.Reason)
+ copy(dAtA[i:], m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ {
+ size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.VersionPriority))
+ i--
+ dAtA[i] = 0x40
+ i = encodeVarintGenerated(dAtA, i, uint64(m.GroupPriorityMinimum))
+ i--
+ dAtA[i] = 0x38
+ if m.CABundle != nil {
+ i -= len(m.CABundle)
+ copy(dAtA[i:], m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ i--
+ if m.InsecureSkipTLSVerify {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Group)
+ copy(dAtA[i:], m.Group)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+ i--
+ dAtA[i] = 0x12
+ if m.Service != nil {
+ {
+ size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *APIServiceStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APIServiceStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Port != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
+ i--
+ dAtA[i] = 0x18
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *APIService) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *APIServiceCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *APIServiceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *APIServiceSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Group)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Version)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 2
+ if m.CABundle != nil {
+ l = len(m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ n += 1 + sovGenerated(uint64(m.GroupPriorityMinimum))
+ n += 1 + sovGenerated(uint64(m.VersionPriority))
+ return n
+}
+
+func (m *APIServiceStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServiceReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Port != nil {
+ n += 1 + sovGenerated(uint64(*m.Port))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *APIService) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&APIService{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "APIServiceSpec", "APIServiceSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "APIServiceStatus", "APIServiceStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&APIServiceCondition{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+ `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+ `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]APIService{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIService", "APIService", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&APIServiceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&APIServiceSpec{`,
+ `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
+ `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+ `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+ `InsecureSkipTLSVerify:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerify) + `,`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `GroupPriorityMinimum:` + fmt.Sprintf("%v", this.GroupPriorityMinimum) + `,`,
+ `VersionPriority:` + fmt.Sprintf("%v", this.VersionPriority) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *APIServiceStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]APIServiceCondition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "APIServiceCondition", "APIServiceCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&APIServiceStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceReference{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Port:` + valueToStringGenerated(this.Port) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *APIService) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIService: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIService: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = APIServiceConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, APIService{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Service == nil {
+ m.Service = &ServiceReference{}
+ }
+ if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Group = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerify", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.InsecureSkipTLSVerify = bool(v != 0)
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...)
+ if m.CABundle == nil {
+ m.CABundle = []byte{}
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GroupPriorityMinimum", wireType)
+ }
+ m.GroupPriorityMinimum = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.GroupPriorityMinimum |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VersionPriority", wireType)
+ }
+ m.VersionPriority = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.VersionPriority |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *APIServiceStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APIServiceStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APIServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, APIServiceCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Port = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto
new file mode 100644
index 0000000000..938039f4d8
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto
@@ -0,0 +1,151 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1";
+
+// APIService represents a server for a particular GroupVersion.
+// Name must be "version.group".
+message APIService {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec contains information for locating and communicating with a server
+ optional APIServiceSpec spec = 2;
+
+ // Status contains derived information about an API server
+ optional APIServiceStatus status = 3;
+}
+
+// APIServiceCondition describes the state of an APIService at a particular point
+message APIServiceCondition {
+ // Type is the type of the condition.
+ optional string type = 1;
+
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ optional string status = 2;
+
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ // +optional
+ optional string reason = 4;
+
+ // Human-readable message indicating details about last transition.
+ // +optional
+ optional string message = 5;
+}
+
+// APIServiceList is a list of APIService objects.
+message APIServiceList {
+ // Standard list metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of APIService
+ repeated APIService items = 2;
+}
+
+// APIServiceSpec contains information for locating and communicating with a server.
+// Only https is supported, though you are able to disable certificate verification.
+message APIServiceSpec {
+ // Service is a reference to the service for this API server. It must communicate
+ // on port 443.
+ // If the Service is nil, that means the handling for the API groupversion is handled locally on this server.
+ // The call will simply delegate to the normal handler chain to be fulfilled.
+ // +optional
+ optional ServiceReference service = 1;
+
+ // Group is the API group name this server hosts
+ optional string group = 2;
+
+ // Version is the API version this server hosts. For example, "v1"
+ optional string version = 3;
+
+ // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server.
+ // This is strongly discouraged. You should use the CABundle instead.
+ optional bool insecureSkipTLSVerify = 4;
+
+ // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate.
+ // If unspecified, system trust roots on the apiserver are used.
+ // +listType=atomic
+ // +optional
+ optional bytes caBundle = 5;
+
+ // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones.
+ // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority.
+ // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10).
+ // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo)
+ // We'd recommend something like: *.k8s.io (except extensions) at 18000 and
+ // PaaSes (OpenShift, Deis) are recommended to be in the 2000s
+ optional int32 groupPriorityMinimum = 7;
+
+ // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero.
+ // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10).
+ // Since it's inside of a group, the number can be small, probably in the 10s.
+ // In case of equal version priorities, the version string will be used to compute the order inside a group.
+ // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered
+ // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version),
+ // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first
+ // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major
+ // version, then minor version. An example sorted list of versions:
+ // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
+ optional int32 versionPriority = 8;
+}
+
+// APIServiceStatus contains derived information about an API server
+message APIServiceStatus {
+ // Current service state of apiService.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ repeated APIServiceCondition conditions = 1;
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+message ServiceReference {
+ // Namespace is the namespace of the service
+ optional string namespace = 1;
+
+ // Name is the name of the service
+ optional string name = 2;
+
+ // If specified, the port on the service that hosting webhook.
+ // Default to 443 for backward compatibility.
+ // `port` should be a valid port number (1-65535, inclusive).
+ // +optional
+ optional int32 port = 3;
+}
+
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go
new file mode 100644
index 0000000000..baa179571f
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the API group for apiregistration
+const GroupName = "apiregistration.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
+}
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &APIService{},
+ &APIServiceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go
new file mode 100644
index 0000000000..83fb8445f1
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.7
+// +k8s:prerelease-lifecycle-gen:deprecated=1.19
+// +k8s:prerelease-lifecycle-gen:replacement=apiregistration.k8s.io,v1,APIServiceList
+
+// APIServiceList is a list of APIService objects.
+type APIServiceList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of APIService
+ Items []APIService `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ServiceReference holds a reference to Service.legacy.k8s.io
+type ServiceReference struct {
+ // Namespace is the namespace of the service
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
+ // Name is the name of the service
+ Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
+ // If specified, the port on the service that hosting webhook.
+ // Default to 443 for backward compatibility.
+ // `port` should be a valid port number (1-65535, inclusive).
+ // +optional
+ Port *int32 `json:"port,omitempty" protobuf:"varint,3,opt,name=port"`
+}
+
+// APIServiceSpec contains information for locating and communicating with a server.
+// Only https is supported, though you are able to disable certificate verification.
+type APIServiceSpec struct {
+ // Service is a reference to the service for this API server. It must communicate
+ // on port 443.
+ // If the Service is nil, that means the handling for the API groupversion is handled locally on this server.
+ // The call will simply delegate to the normal handler chain to be fulfilled.
+ // +optional
+ Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"`
+ // Group is the API group name this server hosts
+ Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"`
+ // Version is the API version this server hosts. For example, "v1"
+ Version string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"`
+
+ // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server.
+ // This is strongly discouraged. You should use the CABundle instead.
+ InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty" protobuf:"varint,4,opt,name=insecureSkipTLSVerify"`
+ // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate.
+ // If unspecified, system trust roots on the apiserver are used.
+ // +listType=atomic
+ // +optional
+ CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,5,opt,name=caBundle"`
+
+ // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones.
+ // Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority.
+ // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10).
+ // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo)
+ // We'd recommend something like: *.k8s.io (except extensions) at 18000 and
+ // PaaSes (OpenShift, Deis) are recommended to be in the 2000s
+ GroupPriorityMinimum int32 `json:"groupPriorityMinimum" protobuf:"varint,7,opt,name=groupPriorityMinimum"`
+
+ // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero.
+ // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10).
+ // Since it's inside of a group, the number can be small, probably in the 10s.
+ // In case of equal version priorities, the version string will be used to compute the order inside a group.
+ // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered
+ // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version),
+ // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first
+ // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major
+ // version, then minor version. An example sorted list of versions:
+ // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.
+ VersionPriority int32 `json:"versionPriority" protobuf:"varint,8,opt,name=versionPriority"`
+
+ // leaving this here so everyone remembers why proto index 6 is skipped
+ // Priority int64 `json:"priority" protobuf:"varint,6,opt,name=priority"`
+}
+
+// ConditionStatus indicates the status of a condition (true, false, or unknown).
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition;
+// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+ ConditionTrue ConditionStatus = "True"
+ ConditionFalse ConditionStatus = "False"
+ ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// APIServiceConditionType is a valid value for APIServiceCondition.Type
+type APIServiceConditionType string
+
+const (
+ // Available indicates that the service exists and is reachable
+ Available APIServiceConditionType = "Available"
+)
+
+// APIServiceCondition describes the state of an APIService at a particular point
+type APIServiceCondition struct {
+ // Type is the type of the condition.
+ Type APIServiceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=APIServiceConditionType"`
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+ // Last time the condition transitioned from one status to another.
+ // +optional
+ LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // Human-readable message indicating details about last transition.
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// APIServiceStatus contains derived information about an API server
+type APIServiceStatus struct {
+ // Current service state of apiService.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []APIServiceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.7
+// +k8s:prerelease-lifecycle-gen:deprecated=1.19
+// +k8s:prerelease-lifecycle-gen:replacement=apiregistration.k8s.io,v1,APIService
+
+// APIService represents a server for a particular GroupVersion.
+// Name must be "version.group".
+type APIService struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec contains information for locating and communicating with a server
+ Spec APIServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // Status contains derived information about an API server
+ Status APIServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000000..665b959f71
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,299 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ unsafe "unsafe"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*APIService)(nil), (*apiregistration.APIService)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_APIService_To_apiregistration_APIService(a.(*APIService), b.(*apiregistration.APIService), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIService)(nil), (*APIService)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIService_To_v1beta1_APIService(a.(*apiregistration.APIService), b.(*APIService), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceCondition)(nil), (*apiregistration.APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(a.(*APIServiceCondition), b.(*apiregistration.APIServiceCondition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceCondition)(nil), (*APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(a.(*apiregistration.APIServiceCondition), b.(*APIServiceCondition), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceList)(nil), (*apiregistration.APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(a.(*APIServiceList), b.(*apiregistration.APIServiceList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceList)(nil), (*APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(a.(*apiregistration.APIServiceList), b.(*APIServiceList), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceSpec)(nil), (*apiregistration.APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(a.(*APIServiceSpec), b.(*apiregistration.APIServiceSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceSpec)(nil), (*APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(a.(*apiregistration.APIServiceSpec), b.(*APIServiceSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*APIServiceStatus)(nil), (*apiregistration.APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(a.(*APIServiceStatus), b.(*apiregistration.APIServiceStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceStatus)(nil), (*APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(a.(*apiregistration.APIServiceStatus), b.(*APIServiceStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(a.(*ServiceReference), b.(*apiregistration.ServiceReference), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*apiregistration.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(a.(*apiregistration.ServiceReference), b.(*ServiceReference), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1beta1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_APIService_To_apiregistration_APIService is an autogenerated conversion function.
+func Convert_v1beta1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error {
+ return autoConvert_v1beta1_APIService_To_apiregistration_APIService(in, out, s)
+}
+
+func autoConvert_apiregistration_APIService_To_v1beta1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error {
+ out.ObjectMeta = in.ObjectMeta
+ if err := Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ if err := Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(&in.Status, &out.Status, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_apiregistration_APIService_To_v1beta1_APIService is an autogenerated conversion function.
+func Convert_apiregistration_APIService_To_v1beta1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIService_To_v1beta1_APIService(in, out, s)
+}
+
+func autoConvert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error {
+ out.Type = apiregistration.APIServiceConditionType(in.Type)
+ out.Status = apiregistration.ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+// Convert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition is an autogenerated conversion function.
+func Convert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error {
+ return autoConvert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error {
+ out.Type = APIServiceConditionType(in.Type)
+ out.Status = ConditionStatus(in.Status)
+ out.LastTransitionTime = in.LastTransitionTime
+ out.Reason = in.Reason
+ out.Message = in.Message
+ return nil
+}
+
+// Convert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(in, out, s)
+}
+
+func autoConvert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]apiregistration.APIService, len(*in))
+ for i := range *in {
+ if err := Convert_v1beta1_APIService_To_apiregistration_APIService(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_v1beta1_APIServiceList_To_apiregistration_APIServiceList is an autogenerated conversion function.
+func Convert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error {
+ return autoConvert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error {
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIService, len(*in))
+ for i := range *in {
+ if err := Convert_apiregistration_APIService_To_v1beta1_APIService(&(*in)[i], &(*out)[i], s); err != nil {
+ return err
+ }
+ }
+ } else {
+ out.Items = nil
+ }
+ return nil
+}
+
+// Convert_apiregistration_APIServiceList_To_v1beta1_APIServiceList is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(in, out, s)
+}
+
+func autoConvert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error {
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(apiregistration.ServiceReference)
+ if err := Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Service = nil
+ }
+ out.Group = in.Group
+ out.Version = in.Version
+ out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+ out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
+ out.GroupPriorityMinimum = in.GroupPriorityMinimum
+ out.VersionPriority = in.VersionPriority
+ return nil
+}
+
+// Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec is an autogenerated conversion function.
+func Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error {
+ return autoConvert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error {
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(ServiceReference)
+ if err := Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Service = nil
+ }
+ out.Group = in.Group
+ out.Version = in.Version
+ out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+ out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle))
+ out.GroupPriorityMinimum = in.GroupPriorityMinimum
+ out.VersionPriority = in.VersionPriority
+ return nil
+}
+
+// Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]apiregistration.APIServiceCondition)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus is an autogenerated conversion function.
+func Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error {
+ return autoConvert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(in, out, s)
+}
+
+func autoConvert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error {
+ out.Conditions = *(*[]APIServiceCondition)(unsafe.Pointer(&in.Conditions))
+ return nil
+}
+
+// Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus is an autogenerated conversion function.
+func Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error {
+ return autoConvert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(in, out, s)
+}
+
+func autoConvert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error {
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference is an autogenerated conversion function.
+func Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error {
+ return autoConvert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(in, out, s)
+}
+
+func autoConvert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error {
+ out.Namespace = in.Namespace
+ out.Name = in.Name
+ if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function.
+func Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error {
+ return autoConvert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(in, out, s)
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..989688e9fd
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,174 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIService) DeepCopyInto(out *APIService) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIService.
+func (in *APIService) DeepCopy() *APIService {
+ if in == nil {
+ return nil
+ }
+ out := new(APIService)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIService) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceCondition) DeepCopyInto(out *APIServiceCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceCondition.
+func (in *APIServiceCondition) DeepCopy() *APIServiceCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceList) DeepCopyInto(out *APIServiceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIService, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceList.
+func (in *APIServiceList) DeepCopy() *APIServiceList {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIServiceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceSpec) DeepCopyInto(out *APIServiceSpec) {
+ *out = *in
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(ServiceReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceSpec.
+func (in *APIServiceSpec) DeepCopy() *APIServiceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceStatus) DeepCopyInto(out *APIServiceStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]APIServiceCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceStatus.
+func (in *APIServiceStatus) DeepCopy() *APIServiceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
+ *out = *in
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
+func (in *ServiceReference) DeepCopy() *ServiceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go
new file mode 100644
index 0000000000..034247c30c
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,48 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ scheme.AddTypeDefaultingFunc(&APIService{}, func(obj interface{}) { SetObjectDefaults_APIService(obj.(*APIService)) })
+ scheme.AddTypeDefaultingFunc(&APIServiceList{}, func(obj interface{}) { SetObjectDefaults_APIServiceList(obj.(*APIServiceList)) })
+ return nil
+}
+
+func SetObjectDefaults_APIService(in *APIService) {
+ if in.Spec.Service != nil {
+ SetDefaults_ServiceReference(in.Spec.Service)
+ }
+}
+
+func SetObjectDefaults_APIServiceList(in *APIServiceList) {
+ for i := range in.Items {
+ a := &in.Items[i]
+ SetObjectDefaults_APIService(a)
+ }
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 0000000000..e299447181
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,74 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *APIService) APILifecycleIntroduced() (major, minor int) {
+ return 1, 7
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *APIService) APILifecycleDeprecated() (major, minor int) {
+ return 1, 19
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *APIService) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "apiregistration.k8s.io", Version: "v1", Kind: "APIService"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *APIService) APILifecycleRemoved() (major, minor int) {
+ return 1, 22
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *APIServiceList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 7
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *APIServiceList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 19
+}
+
+// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
+// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go.
+func (in *APIServiceList) APILifecycleReplacement() schema.GroupVersionKind {
+ return schema.GroupVersionKind{Group: "apiregistration.k8s.io", Version: "v1", Kind: "APIServiceList"}
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *APIServiceList) APILifecycleRemoved() (major, minor int) {
+ return 1, 22
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..45d0347c02
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go
@@ -0,0 +1,221 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package apiregistration
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIService) DeepCopyInto(out *APIService) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIService.
+func (in *APIService) DeepCopy() *APIService {
+ if in == nil {
+ return nil
+ }
+ out := new(APIService)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIService) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceCondition) DeepCopyInto(out *APIServiceCondition) {
+ *out = *in
+ in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceCondition.
+func (in *APIServiceCondition) DeepCopy() *APIServiceCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceList) DeepCopyInto(out *APIServiceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]APIService, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceList.
+func (in *APIServiceList) DeepCopy() *APIServiceList {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIServiceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceSpec) DeepCopyInto(out *APIServiceSpec) {
+ *out = *in
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(ServiceReference)
+ **out = **in
+ }
+ if in.CABundle != nil {
+ in, out := &in.CABundle, &out.CABundle
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceSpec.
+func (in *APIServiceSpec) DeepCopy() *APIServiceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIServiceStatus) DeepCopyInto(out *APIServiceStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]APIServiceCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceStatus.
+func (in *APIServiceStatus) DeepCopy() *APIServiceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(APIServiceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ByGroupPriorityMinimum) DeepCopyInto(out *ByGroupPriorityMinimum) {
+ {
+ in := &in
+ *out = make(ByGroupPriorityMinimum, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(APIService)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByGroupPriorityMinimum.
+func (in ByGroupPriorityMinimum) DeepCopy() ByGroupPriorityMinimum {
+ if in == nil {
+ return nil
+ }
+ out := new(ByGroupPriorityMinimum)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ByVersionPriority) DeepCopyInto(out *ByVersionPriority) {
+ {
+ in := &in
+ *out = make(ByVersionPriority, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(APIService)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByVersionPriority.
+func (in ByVersionPriority) DeepCopy() ByVersionPriority {
+ if in == nil {
+ return nil
+ }
+ out := new(ByVersionPriority)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
+func (in *ServiceReference) DeepCopy() *ServiceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go
new file mode 100644
index 0000000000..7dc3756168
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go
new file mode 100644
index 0000000000..c60532dd7c
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
+ apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ apiregistrationv1.AddToScheme,
+ apiregistrationv1beta1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go
new file mode 100644
index 0000000000..7e1e008fc3
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go
@@ -0,0 +1,101 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ http "net/http"
+
+ rest "k8s.io/client-go/rest"
+ apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
+ scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+)
+
+type ApiregistrationV1Interface interface {
+ RESTClient() rest.Interface
+ APIServicesGetter
+}
+
+// ApiregistrationV1Client is used to interact with features provided by the apiregistration.k8s.io group.
+type ApiregistrationV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *ApiregistrationV1Client) APIServices() APIServiceInterface {
+ return newAPIServices(c)
+}
+
+// NewForConfig creates a new ApiregistrationV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*ApiregistrationV1Client, error) {
+ config := *c
+ setConfigDefaults(&config)
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new ApiregistrationV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiregistrationV1Client, error) {
+ config := *c
+ setConfigDefaults(&config)
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &ApiregistrationV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ApiregistrationV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ApiregistrationV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new ApiregistrationV1Client for the given RESTClient.
+func New(c rest.Interface) *ApiregistrationV1Client {
+ return &ApiregistrationV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) {
+ gv := apiregistrationv1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ApiregistrationV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go
new file mode 100644
index 0000000000..a27b9848d8
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ context "context"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ gentype "k8s.io/client-go/gentype"
+ apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
+ scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+)
+
+// APIServicesGetter has a method to return a APIServiceInterface.
+// A group's client should implement this interface.
+type APIServicesGetter interface {
+ APIServices() APIServiceInterface
+}
+
+// APIServiceInterface has methods to work with APIService resources.
+type APIServiceInterface interface {
+ Create(ctx context.Context, aPIService *apiregistrationv1.APIService, opts metav1.CreateOptions) (*apiregistrationv1.APIService, error)
+ Update(ctx context.Context, aPIService *apiregistrationv1.APIService, opts metav1.UpdateOptions) (*apiregistrationv1.APIService, error)
+ // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+ UpdateStatus(ctx context.Context, aPIService *apiregistrationv1.APIService, opts metav1.UpdateOptions) (*apiregistrationv1.APIService, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiregistrationv1.APIService, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*apiregistrationv1.APIServiceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiregistrationv1.APIService, err error)
+ APIServiceExpansion
+}
+
+// aPIServices implements APIServiceInterface
+type aPIServices struct {
+ *gentype.ClientWithList[*apiregistrationv1.APIService, *apiregistrationv1.APIServiceList]
+}
+
+// newAPIServices returns a APIServices
+func newAPIServices(c *ApiregistrationV1Client) *aPIServices {
+ return &aPIServices{
+ gentype.NewClientWithList[*apiregistrationv1.APIService, *apiregistrationv1.APIServiceList](
+ "apiservices",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ "",
+ func() *apiregistrationv1.APIService { return &apiregistrationv1.APIService{} },
+ func() *apiregistrationv1.APIServiceList { return &apiregistrationv1.APIServiceList{} },
+ gentype.PrefersProtobuf[*apiregistrationv1.APIService](),
+ ),
+ }
+}
diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go
new file mode 100644
index 0000000000..3af5d054f1
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go
new file mode 100644
index 0000000000..87aa187160
--- /dev/null
+++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type APIServiceExpansion interface{}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 2c452fc108..e07ff1c6ff 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -510,6 +510,9 @@ github.com/emicklei/go-restful/v3/log
# github.com/emicklei/proto v1.14.2
## explicit; go 1.12
github.com/emicklei/proto
+# github.com/evanphx/json-patch v5.9.0+incompatible
+## explicit
+github.com/evanphx/json-patch
# github.com/evanphx/json-patch/v5 v5.9.11
## explicit; go 1.18
github.com/evanphx/json-patch/v5
@@ -524,6 +527,9 @@ github.com/fsnotify/fsnotify/internal
# github.com/fxamacker/cbor/v2 v2.9.0
## explicit; go 1.20
github.com/fxamacker/cbor/v2
+# github.com/ghodss/yaml v1.0.0
+## explicit
+github.com/ghodss/yaml
# github.com/go-chi/chi/v5 v5.2.4
## explicit; go 1.22
github.com/go-chi/chi/v5
@@ -829,6 +835,9 @@ github.com/hashicorp/golang-lru/simplelru
github.com/henvic/httpretty
github.com/henvic/httpretty/internal/color
github.com/henvic/httpretty/internal/header
+# github.com/imdario/mergo v0.3.11
+## explicit; go 1.13
+github.com/imdario/mergo
# github.com/in-toto/attestation v1.1.2
## explicit; go 1.22
github.com/in-toto/attestation/go/v1
@@ -1093,14 +1102,93 @@ github.com/openshift-pipelines/pipelines-as-code/pkg/hub/vars
github.com/openshift-pipelines/pipelines-as-code/pkg/params/settings
# github.com/openshift/api v0.0.0-20240521185306-0314f31e7774
## explicit; go 1.22.0
+github.com/openshift/api
+github.com/openshift/api/apiserver
+github.com/openshift/api/apiserver/v1
+github.com/openshift/api/apps
+github.com/openshift/api/apps/v1
+github.com/openshift/api/authorization
+github.com/openshift/api/authorization/v1
+github.com/openshift/api/build
+github.com/openshift/api/build/v1
+github.com/openshift/api/cloudnetwork
+github.com/openshift/api/cloudnetwork/v1
+github.com/openshift/api/config
+github.com/openshift/api/config/v1
+github.com/openshift/api/config/v1alpha1
+github.com/openshift/api/console
github.com/openshift/api/console/v1
+github.com/openshift/api/helm
+github.com/openshift/api/helm/v1beta1
+github.com/openshift/api/image
+github.com/openshift/api/image/docker10
+github.com/openshift/api/image/dockerpre012
+github.com/openshift/api/image/v1
+github.com/openshift/api/imageregistry
+github.com/openshift/api/imageregistry/v1
+github.com/openshift/api/kubecontrolplane
+github.com/openshift/api/kubecontrolplane/v1
+github.com/openshift/api/legacyconfig/v1
+github.com/openshift/api/machine
+github.com/openshift/api/machine/v1
+github.com/openshift/api/machine/v1alpha1
+github.com/openshift/api/machine/v1beta1
+github.com/openshift/api/monitoring
+github.com/openshift/api/monitoring/v1
+github.com/openshift/api/network
+github.com/openshift/api/network/v1
+github.com/openshift/api/network/v1alpha1
+github.com/openshift/api/networkoperator
+github.com/openshift/api/networkoperator/v1
+github.com/openshift/api/oauth
+github.com/openshift/api/oauth/v1
+github.com/openshift/api/openshiftcontrolplane
+github.com/openshift/api/openshiftcontrolplane/v1
+github.com/openshift/api/operator
+github.com/openshift/api/operator/v1
+github.com/openshift/api/operator/v1alpha1
+github.com/openshift/api/operatorcontrolplane
+github.com/openshift/api/operatorcontrolplane/v1alpha1
+github.com/openshift/api/osin
+github.com/openshift/api/osin/v1
+github.com/openshift/api/pkg/serialization
+github.com/openshift/api/project
+github.com/openshift/api/project/v1
+github.com/openshift/api/quota
+github.com/openshift/api/quota/v1
+github.com/openshift/api/route
github.com/openshift/api/route/v1
+github.com/openshift/api/samples
+github.com/openshift/api/samples/v1
+github.com/openshift/api/security
github.com/openshift/api/security/v1
+github.com/openshift/api/servicecertsigner
+github.com/openshift/api/servicecertsigner/v1alpha1
+github.com/openshift/api/sharedresource
+github.com/openshift/api/sharedresource/v1alpha1
+github.com/openshift/api/template
+github.com/openshift/api/template/v1
+github.com/openshift/api/user
+github.com/openshift/api/user/v1
# github.com/openshift/apiserver-library-go v0.0.0-20230816171015-6bfafa975bfb
## explicit; go 1.20
github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util/sort
# github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d
## explicit; go 1.22.0
+github.com/openshift/client-go/config/applyconfigurations/config/v1
+github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1
+github.com/openshift/client-go/config/applyconfigurations/internal
+github.com/openshift/client-go/config/clientset/versioned
+github.com/openshift/client-go/config/clientset/versioned/scheme
+github.com/openshift/client-go/config/clientset/versioned/typed/config/v1
+github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1
+github.com/openshift/client-go/config/informers/externalversions
+github.com/openshift/client-go/config/informers/externalversions/config
+github.com/openshift/client-go/config/informers/externalversions/config/v1
+github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1
+github.com/openshift/client-go/config/informers/externalversions/internalinterfaces
+github.com/openshift/client-go/config/listers/config/v1
+github.com/openshift/client-go/config/listers/config/v1alpha1
github.com/openshift/client-go/route/clientset/versioned/scheme
github.com/openshift/client-go/security/applyconfigurations/internal
github.com/openshift/client-go/security/applyconfigurations/security/v1
@@ -1109,6 +1197,21 @@ github.com/openshift/client-go/security/clientset/versioned/fake
github.com/openshift/client-go/security/clientset/versioned/scheme
github.com/openshift/client-go/security/clientset/versioned/typed/security/v1
github.com/openshift/client-go/security/clientset/versioned/typed/security/v1/fake
+# github.com/openshift/library-go v0.0.0-20230503173034-95ca3c14e50a
+## explicit; go 1.20
+github.com/openshift/library-go/pkg/controller/factory
+github.com/openshift/library-go/pkg/crypto
+github.com/openshift/library-go/pkg/operator/condition
+github.com/openshift/library-go/pkg/operator/configobserver
+github.com/openshift/library-go/pkg/operator/configobserver/apiserver
+github.com/openshift/library-go/pkg/operator/events
+github.com/openshift/library-go/pkg/operator/management
+github.com/openshift/library-go/pkg/operator/resource/resourceapply
+github.com/openshift/library-go/pkg/operator/resource/resourcehelper
+github.com/openshift/library-go/pkg/operator/resource/resourcemerge
+github.com/openshift/library-go/pkg/operator/resource/resourceread
+github.com/openshift/library-go/pkg/operator/resourcesynccontroller
+github.com/openshift/library-go/pkg/operator/v1helpers
# github.com/openzipkin/zipkin-go v0.4.3
## explicit; go 1.20
github.com/openzipkin/zipkin-go/model
@@ -1136,6 +1239,7 @@ github.com/pmezard/go-difflib/difflib
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus
+github.com/prometheus/client_golang/prometheus/collectors
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/promhttp/internal
@@ -1175,6 +1279,9 @@ github.com/rcrowley/go-metrics
# github.com/rivo/uniseg v0.4.7
## explicit; go 1.18
github.com/rivo/uniseg
+# github.com/robfig/cron v1.2.0
+## explicit
+github.com/robfig/cron
# github.com/rogpeppe/go-internal v1.14.1
## explicit; go 1.23
github.com/rogpeppe/go-internal/internal/syscall/windows
@@ -1928,6 +2035,7 @@ gotest.tools/v3/internal/source
# k8s.io/api v0.35.0 => k8s.io/api v0.32.4
## explicit; go 1.23.0
k8s.io/api/admission/v1
+k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1alpha1
k8s.io/api/admissionregistration/v1beta1
@@ -2059,6 +2167,9 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/reflect
+# k8s.io/apiserver v0.34.1
+## explicit; go 1.24.0
+k8s.io/apiserver/pkg/authentication/user
# k8s.io/client-go v1.5.2 => k8s.io/client-go v0.32.4
## explicit; go 1.23.0
k8s.io/client-go/applyconfigurations
@@ -2430,6 +2541,12 @@ k8s.io/code-generator/cmd/register-gen/generators
k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util
k8s.io/code-generator/third_party/forked/golang/reflect
+# k8s.io/component-base v0.34.1
+## explicit; go 1.24.0
+k8s.io/component-base/metrics
+k8s.io/component-base/metrics/legacyregistry
+k8s.io/component-base/metrics/prometheusextension
+k8s.io/component-base/version
# k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d
## explicit; go 1.20
k8s.io/gengo/v2
@@ -2447,6 +2564,13 @@ k8s.io/klog/v2/internal/dbg
k8s.io/klog/v2/internal/serialize
k8s.io/klog/v2/internal/severity
k8s.io/klog/v2/internal/sloghandler
+# k8s.io/kube-aggregator v0.34.1
+## explicit; go 1.24.0
+k8s.io/kube-aggregator/pkg/apis/apiregistration
+k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
+k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
+k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme
+k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1
# k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e => k8s.io/kube-openapi v0.0.0-20250627150254-e9823e99808e
## explicit; go 1.22
k8s.io/kube-openapi/cmd/openapi-gen
@@ -2568,6 +2692,12 @@ sigs.k8s.io/gateway-api/apis/v1
## explicit; go 1.23
sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json
+# sigs.k8s.io/kube-storage-version-migrator v0.0.4
+## explicit; go 1.13
+sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1
+sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset
+sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme
+sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1
# sigs.k8s.io/randfill v1.0.0
## explicit; go 1.18
sigs.k8s.io/randfill
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE b/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go
new file mode 100644
index 0000000000..da6d19a24b
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+// +groupName=migration.k8s.io
+package v1alpha1
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go
new file mode 100644
index 0000000000..f400f747eb
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "migration.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &StorageVersionMigration{},
+ &StorageVersionMigrationList{},
+ &StorageState{},
+ &StorageStateList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go
new file mode 100644
index 0000000000..dde42a5b37
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient:nonNamespaced
+
+// StorageVersionMigration represents a migration of stored data to the latest
+// storage version.
+type StorageVersionMigration struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // Specification of the migration.
+ // +optional
+ Spec StorageVersionMigrationSpec `json:"spec,omitempty"`
+ // Status of the migration.
+ // +optional
+ Status StorageVersionMigrationStatus `json:"status,omitempty"`
+}
+
+// The names of the group, the version, and the resource.
+type GroupVersionResource struct {
+ // The name of the group.
+ Group string `json:"group,omitempty"`
+ // The name of the version.
+ Version string `json:"version,omitempty"`
+ // The name of the resource.
+ Resource string `json:"resource,omitempty"`
+}
+
+// Spec of the storage version migration.
+type StorageVersionMigrationSpec struct {
+ // The resource that is being migrated. The migrator sends requests to
+ // the endpoint serving the resource.
+ // Immutable.
+ Resource GroupVersionResource `json:"resource"`
+ // The token used in the list options to get the next chunk of objects
+ // to migrate. When the .status.conditions indicates the migration is
+ // "Running", users can use this token to check the progress of the
+ // migration.
+ // +optional
+ ContinueToken string `json:"continueToken,omitempty"`
+ // TODO: consider recording the storage version hash when the migration
+ // is created. It can avoid races.
+}
+
+type MigrationConditionType string
+
+const (
+ // Indicates that the migration is running.
+ MigrationRunning MigrationConditionType = "Running"
+ // Indicates that the migration has completed successfully.
+ MigrationSucceeded MigrationConditionType = "Succeeded"
+ // Indicates that the migration has failed.
+ MigrationFailed MigrationConditionType = "Failed"
+)
+
+// Describes the state of a migration at a certain point.
+type MigrationCondition struct {
+ // Type of the condition.
+ Type MigrationConditionType `json:"type"`
+ // Status of the condition, one of True, False, Unknown.
+ Status corev1.ConditionStatus `json:"status"`
+ // The last time this condition was updated.
+ // +optional
+ LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
+ // The reason for the condition's last transition.
+ // +optional
+ Reason string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ // +optional
+ Message string `json:"message,omitempty"`
+}
+
+// Status of the storage version migration.
+type StorageVersionMigrationStatus struct {
+ // The latest available observations of the migration's current state.
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ Conditions []MigrationCondition `json:"conditions,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StorageVersionMigrationList is a collection of storage version migrations.
+type StorageVersionMigrationList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ // Items is the list of StorageVersionMigration
+ Items []StorageVersionMigration `json:"items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient:nonNamespaced
+
+// The state of the storage of a specific resource.
+type StorageState struct {
+ metav1.TypeMeta `json:",inline"`
+ // The name must be "<.spec.resource.resouce>.<.spec.resource.group>".
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ // Specification of the storage state.
+ // +optional
+ Spec StorageStateSpec `json:"spec,omitempty"`
+ // Status of the storage state.
+ // +optional
+ Status StorageStateStatus `json:"status,omitempty"`
+}
+
+// The names of the group and the resource.
+type GroupResource struct {
+ // The name of the group.
+ Group string `json:"group,omitempty"`
+ // The name of the resource.
+ Resource string `json:"resource,omitempty"`
+}
+
+// Specification of the storage state.
+type StorageStateSpec struct {
+ // The resource this storageState is about.
+ Resource GroupResource `json:"resource,omitempty"`
+}
+
+// Unknown is a valid value in persistedStorageVersionHashes.
+const Unknown = "Unknown"
+
+// Status of the storage state.
+type StorageStateStatus struct {
+ // The hash values of storage versions that persisted instances of
+ // spec.resource might still be encoded in.
+ // "Unknown" is a valid value in the list, and is the default value.
+ // It is not safe to upgrade or downgrade to an apiserver binary that does not
+ // support all versions listed in this field, or if "Unknown" is listed.
+ // Once the storage version migration for this resource has completed, the
+ // value of this field is refined to only contain the
+ // currentStorageVersionHash.
+ // Once the apiserver has changed the storage version, the new storage version
+ // is appended to the list.
+ // +optional
+ PersistedStorageVersionHashes []string `json:"persistedStorageVersionHashes,omitempty"`
+ // The hash value of the current storage version, as shown in the discovery
+ // document served by the API server.
+ // Storage Version is the version to which objects are converted to
+ // before persisted.
+ // +optional
+ CurrentStorageVersionHash string `json:"currentStorageVersionHash,omitempty"`
+ // LastHeartbeatTime is the last time the storage migration triggering
+ // controller checks the storage version hash of this resource in the
+ // discovery document and updates this field.
+ // +optional
+ LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// StorageStateList is a collection of storage state.
+type StorageStateList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ // Items is the list of StorageState
+ Items []StorageState `json:"items"`
+}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..5d6374d609
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,275 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupResource) DeepCopyInto(out *GroupResource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource.
+func (in *GroupResource) DeepCopy() *GroupResource {
+ if in == nil {
+ return nil
+ }
+ out := new(GroupResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource.
+func (in *GroupVersionResource) DeepCopy() *GroupVersionResource {
+ if in == nil {
+ return nil
+ }
+ out := new(GroupVersionResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MigrationCondition) DeepCopyInto(out *MigrationCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationCondition.
+func (in *MigrationCondition) DeepCopy() *MigrationCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(MigrationCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageState) DeepCopyInto(out *StorageState) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageState.
+func (in *StorageState) DeepCopy() *StorageState {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageState)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageState) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageStateList) DeepCopyInto(out *StorageStateList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]StorageState, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateList.
+func (in *StorageStateList) DeepCopy() *StorageStateList {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageStateList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageStateList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageStateSpec) DeepCopyInto(out *StorageStateSpec) {
+ *out = *in
+ out.Resource = in.Resource
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateSpec.
+func (in *StorageStateSpec) DeepCopy() *StorageStateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageStateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageStateStatus) DeepCopyInto(out *StorageStateStatus) {
+ *out = *in
+ if in.PersistedStorageVersionHashes != nil {
+ in, out := &in.PersistedStorageVersionHashes, &out.PersistedStorageVersionHashes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateStatus.
+func (in *StorageStateStatus) DeepCopy() *StorageStateStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageStateStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageVersionMigration) DeepCopyInto(out *StorageVersionMigration) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigration.
+func (in *StorageVersionMigration) DeepCopy() *StorageVersionMigration {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageVersionMigration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageVersionMigration) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageVersionMigrationList) DeepCopyInto(out *StorageVersionMigrationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]StorageVersionMigration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationList.
+func (in *StorageVersionMigrationList) DeepCopy() *StorageVersionMigrationList {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageVersionMigrationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *StorageVersionMigrationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageVersionMigrationSpec) DeepCopyInto(out *StorageVersionMigrationSpec) {
+ *out = *in
+ out.Resource = in.Resource
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationSpec.
+func (in *StorageVersionMigrationSpec) DeepCopy() *StorageVersionMigrationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageVersionMigrationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageVersionMigrationStatus) DeepCopyInto(out *StorageVersionMigrationStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]MigrationCondition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationStatus.
+func (in *StorageVersionMigrationStatus) DeepCopy() *StorageVersionMigrationStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageVersionMigrationStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go
new file mode 100644
index 0000000000..d8f4f36dd1
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go
@@ -0,0 +1,97 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package clientset
+
+import (
+ "fmt"
+
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+ migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ MigrationV1alpha1() migrationv1alpha1.MigrationV1alpha1Interface
+}
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ migrationV1alpha1 *migrationv1alpha1.MigrationV1alpha1Client
+}
+
+// MigrationV1alpha1 retrieves the MigrationV1alpha1Client
+func (c *Clientset) MigrationV1alpha1() migrationv1alpha1.MigrationV1alpha1Interface {
+ return c.migrationV1alpha1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+ var cs Clientset
+ var err error
+ cs.migrationV1alpha1, err = migrationv1alpha1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ var cs Clientset
+ cs.migrationV1alpha1 = migrationv1alpha1.NewForConfigOrDie(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
+ return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.migrationV1alpha1 = migrationv1alpha1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go
new file mode 100644
index 0000000000..ee865e56d1
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package clientset
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go
new file mode 100644
index 0000000000..7dc3756168
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go
new file mode 100644
index 0000000000..93f2b6e2e7
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ migrationv1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go
new file mode 100644
index 0000000000..df51baa4d4
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go
new file mode 100644
index 0000000000..3ce4f57536
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type StorageStateExpansion interface{}
+
+type StorageVersionMigrationExpansion interface{}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go
new file mode 100644
index 0000000000..097a331f01
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go
@@ -0,0 +1,94 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+ "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme"
+)
+
+type MigrationV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ StorageStatesGetter
+ StorageVersionMigrationsGetter
+}
+
+// MigrationV1alpha1Client is used to interact with features provided by the migration.k8s.io group.
+type MigrationV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *MigrationV1alpha1Client) StorageStates() StorageStateInterface {
+ return newStorageStates(c)
+}
+
+func (c *MigrationV1alpha1Client) StorageVersionMigrations() StorageVersionMigrationInterface {
+ return newStorageVersionMigrations(c)
+}
+
+// NewForConfig creates a new MigrationV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*MigrationV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &MigrationV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new MigrationV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *MigrationV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new MigrationV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *MigrationV1alpha1Client {
+ return &MigrationV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *MigrationV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go
new file mode 100644
index 0000000000..8345b36199
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go
@@ -0,0 +1,184 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+ scheme "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme"
+)
+
+// StorageStatesGetter has a method to return a StorageStateInterface.
+// A group's client should implement this interface.
+type StorageStatesGetter interface {
+ StorageStates() StorageStateInterface
+}
+
+// StorageStateInterface has methods to work with StorageState resources.
+type StorageStateInterface interface {
+ Create(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.CreateOptions) (*v1alpha1.StorageState, error)
+ Update(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (*v1alpha1.StorageState, error)
+ UpdateStatus(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (*v1alpha1.StorageState, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageState, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageStateList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageState, err error)
+ StorageStateExpansion
+}
+
+// storageStates implements StorageStateInterface
+type storageStates struct {
+ client rest.Interface
+}
+
+// newStorageStates returns a StorageStates
+func newStorageStates(c *MigrationV1alpha1Client) *storageStates {
+ return &storageStates{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the storageState, and returns the corresponding storageState object, and an error if there is any.
+func (c *storageStates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageState, err error) {
+ result = &v1alpha1.StorageState{}
+ err = c.client.Get().
+ Resource("storagestates").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of StorageStates that match those selectors.
+func (c *storageStates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageStateList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.StorageStateList{}
+ err = c.client.Get().
+ Resource("storagestates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested storageStates.
+func (c *storageStates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("storagestates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a storageState and creates it. Returns the server's representation of the storageState, and an error, if there is any.
+func (c *storageStates) Create(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.CreateOptions) (result *v1alpha1.StorageState, err error) {
+ result = &v1alpha1.StorageState{}
+ err = c.client.Post().
+ Resource("storagestates").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(storageState).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a storageState and updates it. Returns the server's representation of the storageState, and an error, if there is any.
+func (c *storageStates) Update(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (result *v1alpha1.StorageState, err error) {
+ result = &v1alpha1.StorageState{}
+ err = c.client.Put().
+ Resource("storagestates").
+ Name(storageState.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(storageState).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *storageStates) UpdateStatus(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (result *v1alpha1.StorageState, err error) {
+ result = &v1alpha1.StorageState{}
+ err = c.client.Put().
+ Resource("storagestates").
+ Name(storageState.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(storageState).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the storageState and deletes it. Returns an error if one occurs.
+func (c *storageStates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("storagestates").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *storageStates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("storagestates").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched storageState.
+func (c *storageStates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageState, err error) {
+ result = &v1alpha1.StorageState{}
+ err = c.client.Patch(pt).
+ Resource("storagestates").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go
new file mode 100644
index 0000000000..34fa3a987e
--- /dev/null
+++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go
@@ -0,0 +1,184 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1"
+ scheme "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme"
+)
+
+// StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface.
+// A group's client should implement this interface.
+type StorageVersionMigrationsGetter interface {
+ StorageVersionMigrations() StorageVersionMigrationInterface
+}
+
+// StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources.
+type StorageVersionMigrationInterface interface {
+ Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error)
+ Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error)
+ UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error)
+ StorageVersionMigrationExpansion
+}
+
+// storageVersionMigrations implements StorageVersionMigrationInterface
+type storageVersionMigrations struct {
+ client rest.Interface
+}
+
+// newStorageVersionMigrations returns a StorageVersionMigrations
+func newStorageVersionMigrations(c *MigrationV1alpha1Client) *storageVersionMigrations {
+ return &storageVersionMigrations{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any.
+func (c *storageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) {
+ result = &v1alpha1.StorageVersionMigration{}
+ err = c.client.Get().
+ Resource("storageversionmigrations").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors.
+func (c *storageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.StorageVersionMigrationList{}
+ err = c.client.Get().
+ Resource("storageversionmigrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested storageVersionMigrations.
+func (c *storageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("storageversionmigrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any.
+func (c *storageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
+ result = &v1alpha1.StorageVersionMigration{}
+ err = c.client.Post().
+ Resource("storageversionmigrations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(storageVersionMigration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any.
+func (c *storageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
+ result = &v1alpha1.StorageVersionMigration{}
+ err = c.client.Put().
+ Resource("storageversionmigrations").
+ Name(storageVersionMigration.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(storageVersionMigration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *storageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) {
+ result = &v1alpha1.StorageVersionMigration{}
+ err = c.client.Put().
+ Resource("storageversionmigrations").
+ Name(storageVersionMigration.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(storageVersionMigration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs.
+func (c *storageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("storageversionmigrations").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *storageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("storageversionmigrations").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched storageVersionMigration.
+func (c *storageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) {
+ result = &v1alpha1.StorageVersionMigration{}
+ err = c.client.Patch(pt).
+ Resource("storageversionmigrations").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}